summaryrefslogtreecommitdiffstats
path: root/collectors
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-21 17:19:04 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-21 17:19:04 +0000
commit310edf444908b09ea6d00c03baceb7925f3bb7a2 (patch)
tree7064577c7fa7a851e2e930beb606ea8237b0bbd2 /collectors
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-310edf444908b09ea6d00c03baceb7925f3bb7a2.tar.xz
netdata-310edf444908b09ea6d00c03baceb7925f3bb7a2.zip
Merging upstream version 1.45.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors')
-rw-r--r--collectors/COLLECTORS.md1194
-rw-r--r--collectors/Makefile.am42
-rw-r--r--collectors/README.md62
-rw-r--r--collectors/REFERENCE.md149
-rw-r--r--collectors/all.h445
-rw-r--r--collectors/apps.plugin/Makefile.am12
-rw-r--r--collectors/apps.plugin/README.md402
-rw-r--r--collectors/apps.plugin/apps_groups.conf428
-rw-r--r--collectors/apps.plugin/apps_plugin.c5422
-rw-r--r--collectors/apps.plugin/integrations/applications.md114
-rw-r--r--collectors/apps.plugin/integrations/user_groups.md114
-rw-r--r--collectors/apps.plugin/integrations/users.md114
-rw-r--r--collectors/apps.plugin/metadata.yaml554
-rw-r--r--collectors/cgroups.plugin/Makefile.am23
-rw-r--r--collectors/cgroups.plugin/README.md302
-rw-r--r--collectors/cgroups.plugin/cgroup-charts.c1526
-rw-r--r--collectors/cgroups.plugin/cgroup-discovery.c1245
-rw-r--r--collectors/cgroups.plugin/cgroup-internals.h514
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-name.sh.in706
-rwxr-xr-xcollectors/cgroups.plugin/cgroup-network-helper.sh.in376
-rw-r--r--collectors/cgroups.plugin/cgroup-network.c743
-rw-r--r--collectors/cgroups.plugin/cgroup-top.c520
-rw-r--r--collectors/cgroups.plugin/integrations/containers.md169
-rw-r--r--collectors/cgroups.plugin/integrations/kubernetes_containers.md183
-rw-r--r--collectors/cgroups.plugin/integrations/libvirt_containers.md169
-rw-r--r--collectors/cgroups.plugin/integrations/lxc_containers.md169
-rw-r--r--collectors/cgroups.plugin/integrations/ovirt_containers.md169
-rw-r--r--collectors/cgroups.plugin/integrations/proxmox_containers.md169
-rw-r--r--collectors/cgroups.plugin/integrations/systemd_services.md112
-rw-r--r--collectors/cgroups.plugin/integrations/virtual_machines.md169
-rw-r--r--collectors/cgroups.plugin/metadata.yaml1022
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.c1729
-rw-r--r--collectors/cgroups.plugin/sys_fs_cgroup.h48
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.c130
-rw-r--r--collectors/cgroups.plugin/tests/test_cgroups_plugin.h16
-rw-r--r--collectors/cgroups.plugin/tests/test_doubles.c157
-rw-r--r--collectors/charts.d.plugin/Makefile.am49
-rw-r--r--collectors/charts.d.plugin/README.md190
-rw-r--r--collectors/charts.d.plugin/ap/Makefile.inc13
l---------collectors/charts.d.plugin/ap/README.md1
-rw-r--r--collectors/charts.d.plugin/ap/ap.chart.sh179
-rw-r--r--collectors/charts.d.plugin/ap/ap.conf23
-rw-r--r--collectors/charts.d.plugin/ap/integrations/access_points.md174
-rw-r--r--collectors/charts.d.plugin/ap/metadata.yaml146
-rw-r--r--collectors/charts.d.plugin/apcupsd/Makefile.inc13
l---------collectors/charts.d.plugin/apcupsd/README.md1
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh305
-rw-r--r--collectors/charts.d.plugin/apcupsd/apcupsd.conf25
-rw-r--r--collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md203
-rw-r--r--collectors/charts.d.plugin/apcupsd/metadata.yaml256
-rw-r--r--collectors/charts.d.plugin/charts.d.conf47
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.dryrun-helper.sh72
-rwxr-xr-xcollectors/charts.d.plugin/charts.d.plugin.in809
-rw-r--r--collectors/charts.d.plugin/example/Makefile.inc13
-rw-r--r--collectors/charts.d.plugin/example/README.md14
-rw-r--r--collectors/charts.d.plugin/example/example.chart.sh123
-rw-r--r--collectors/charts.d.plugin/example/example.conf21
-rw-r--r--collectors/charts.d.plugin/libreswan/Makefile.inc13
l---------collectors/charts.d.plugin/libreswan/README.md1
-rw-r--r--collectors/charts.d.plugin/libreswan/integrations/libreswan.md194
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.chart.sh187
-rw-r--r--collectors/charts.d.plugin/libreswan/libreswan.conf29
-rw-r--r--collectors/charts.d.plugin/libreswan/metadata.yaml146
-rw-r--r--collectors/charts.d.plugin/loopsleepms.sh.inc227
-rw-r--r--collectors/charts.d.plugin/opensips/Makefile.inc13
l---------collectors/charts.d.plugin/opensips/README.md1
-rw-r--r--collectors/charts.d.plugin/opensips/integrations/opensips.md192
-rw-r--r--collectors/charts.d.plugin/opensips/metadata.yaml270
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.chart.sh325
-rw-r--r--collectors/charts.d.plugin/opensips/opensips.conf21
-rw-r--r--collectors/charts.d.plugin/sensors/Makefile.inc13
l---------collectors/charts.d.plugin/sensors/README.md1
-rw-r--r--collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md201
-rw-r--r--collectors/charts.d.plugin/sensors/metadata.yaml182
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.chart.sh250
-rw-r--r--collectors/charts.d.plugin/sensors/sensors.conf32
-rw-r--r--collectors/checks.plugin/README.md12
-rw-r--r--collectors/cups.plugin/Makefile.am8
l---------collectors/cups.plugin/README.md1
-rw-r--r--collectors/cups.plugin/cups_plugin.c437
-rw-r--r--collectors/cups.plugin/integrations/cups.md141
-rw-r--r--collectors/cups.plugin/metadata.yaml131
-rw-r--r--collectors/debugfs.plugin/Makefile.am9
-rw-r--r--collectors/debugfs.plugin/README.md65
-rw-r--r--collectors/debugfs.plugin/debugfs_extfrag.c123
-rw-r--r--collectors/debugfs.plugin/debugfs_plugin.c250
-rw-r--r--collectors/debugfs.plugin/debugfs_plugin.h17
-rw-r--r--collectors/debugfs.plugin/debugfs_zswap.c437
-rw-r--r--collectors/debugfs.plugin/integrations/linux_zswap.md138
-rw-r--r--collectors/debugfs.plugin/integrations/power_capping.md132
-rw-r--r--collectors/debugfs.plugin/integrations/system_memory_fragmentation.md136
-rw-r--r--collectors/debugfs.plugin/metadata.yaml395
-rw-r--r--collectors/debugfs.plugin/sys_devices_virtual_powercap.c217
-rw-r--r--collectors/diskspace.plugin/Makefile.am8
l---------collectors/diskspace.plugin/README.md1
-rw-r--r--collectors/diskspace.plugin/integrations/disk_space.md140
-rw-r--r--collectors/diskspace.plugin/metadata.yaml139
-rw-r--r--collectors/diskspace.plugin/plugin_diskspace.c945
-rw-r--r--collectors/ebpf.plugin/Makefile.am42
-rw-r--r--collectors/ebpf.plugin/README.md1071
-rw-r--r--collectors/ebpf.plugin/ebpf.c4126
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf77
-rw-r--r--collectors/ebpf.plugin/ebpf.d/cachestat.conf42
-rw-r--r--collectors/ebpf.plugin/ebpf.d/dcstat.conf40
-rw-r--r--collectors/ebpf.plugin/ebpf.d/disk.conf12
-rw-r--r--collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt1
-rw-r--r--collectors/ebpf.plugin/ebpf.d/fd.conf27
-rw-r--r--collectors/ebpf.plugin/ebpf.d/filesystem.conf23
-rw-r--r--collectors/ebpf.plugin/ebpf.d/functions.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/hardirq.conf11
-rw-r--r--collectors/ebpf.plugin/ebpf.d/mdflush.conf11
-rw-r--r--collectors/ebpf.plugin/ebpf.d/mount.conf23
-rw-r--r--collectors/ebpf.plugin/ebpf.d/network.conf66
-rw-r--r--collectors/ebpf.plugin/ebpf.d/oomkill.conf11
-rw-r--r--collectors/ebpf.plugin/ebpf.d/process.conf31
-rw-r--r--collectors/ebpf.plugin/ebpf.d/shm.conf42
-rw-r--r--collectors/ebpf.plugin/ebpf.d/softirq.conf11
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf34
-rw-r--r--collectors/ebpf.plugin/ebpf.d/sync.conf43
-rw-r--r--collectors/ebpf.plugin/ebpf.d/vfs.conf35
-rw-r--r--collectors/ebpf.plugin/ebpf.h393
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c1485
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h264
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c1591
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h95
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.c392
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.h71
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c1420
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h87
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c940
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.h75
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c1431
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.h92
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c1029
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h62
-rw-r--r--collectors/ebpf.plugin/ebpf_functions.c1093
-rw-r--r--collectors/ebpf.plugin/ebpf_functions.h44
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c686
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.h80
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c456
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.h48
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c517
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.h43
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c565
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.h34
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c1369
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h110
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c1327
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.h64
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c2895
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h348
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c286
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.h36
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c1030
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.h52
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c739
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.h60
-rw-r--r--collectors/ebpf.plugin/ebpf_unittest.c83
-rw-r--r--collectors/ebpf.plugin/ebpf_unittest.h10
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c2522
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.h178
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_cachestat.md179
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_dcstat.md177
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_disk.md137
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md177
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_filesystem.md163
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_hardirq.md137
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_mdflush.md132
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_mount.md140
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_oomkill.md160
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_process.md111
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_processes.md187
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_shm.md185
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_socket.md201
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_softirq.md137
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_swap.md170
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_sync.md157
-rw-r--r--collectors/ebpf.plugin/integrations/ebpf_vfs.md212
-rw-r--r--collectors/ebpf.plugin/metadata.yaml3320
-rw-r--r--collectors/freebsd.plugin/Makefile.am8
-rw-r--r--collectors/freebsd.plugin/README.md16
-rw-r--r--collectors/freebsd.plugin/freebsd_devstat.c759
-rw-r--r--collectors/freebsd.plugin/freebsd_getifaddrs.c599
-rw-r--r--collectors/freebsd.plugin/freebsd_getmntinfo.c297
-rw-r--r--collectors/freebsd.plugin/freebsd_ipfw.c359
-rw-r--r--collectors/freebsd.plugin/freebsd_kstat_zfs.c304
-rw-r--r--collectors/freebsd.plugin/freebsd_sysctl.c3093
-rw-r--r--collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md111
-rw-r--r--collectors/freebsd.plugin/integrations/dev.cpu.temperature.md120
-rw-r--r--collectors/freebsd.plugin/integrations/devstat.md155
-rw-r--r--collectors/freebsd.plugin/integrations/getifaddrs.md161
-rw-r--r--collectors/freebsd.plugin/integrations/getmntinfo.md131
-rw-r--r--collectors/freebsd.plugin/integrations/hw.intrcnt.md121
-rw-r--r--collectors/freebsd.plugin/integrations/ipfw.md126
-rw-r--r--collectors/freebsd.plugin/integrations/kern.cp_time.md139
-rw-r--r--collectors/freebsd.plugin/integrations/kern.ipc.msq.md122
-rw-r--r--collectors/freebsd.plugin/integrations/kern.ipc.sem.md127
-rw-r--r--collectors/freebsd.plugin/integrations/kern.ipc.shm.md121
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md124
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet.ip.stats.md126
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet.tcp.states.md125
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md142
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet.udp.stats.md128
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md132
-rw-r--r--collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md126
-rw-r--r--collectors/freebsd.plugin/integrations/net.isr.md140
-rw-r--r--collectors/freebsd.plugin/integrations/system.ram.md129
-rw-r--r--collectors/freebsd.plugin/integrations/uptime.md120
-rw-r--r--collectors/freebsd.plugin/integrations/vm.loadavg.md128
-rw-r--r--collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md120
-rw-r--r--collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md120
-rw-r--r--collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md121
-rw-r--r--collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md120
-rw-r--r--collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md125
-rw-r--r--collectors/freebsd.plugin/integrations/vm.swap_info.md125
-rw-r--r--collectors/freebsd.plugin/integrations/vm.vmtotal.md129
-rw-r--r--collectors/freebsd.plugin/integrations/zfs.md152
-rw-r--r--collectors/freebsd.plugin/metadata.yaml3398
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.c136
-rw-r--r--collectors/freebsd.plugin/plugin_freebsd.h55
-rw-r--r--collectors/freeipmi.plugin/Makefile.am8
l---------collectors/freeipmi.plugin/README.md1
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c2094
-rw-r--r--collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md275
-rw-r--r--collectors/freeipmi.plugin/metadata.yaml347
-rw-r--r--collectors/idlejitter.plugin/Makefile.am8
l---------collectors/idlejitter.plugin/README.md1
-rw-r--r--collectors/idlejitter.plugin/integrations/idle_os_jitter.md118
-rw-r--r--collectors/idlejitter.plugin/metadata.yaml90
-rw-r--r--collectors/idlejitter.plugin/plugin_idlejitter.c93
-rw-r--r--collectors/ioping.plugin/Makefile.am24
l---------collectors/ioping.plugin/README.md1
-rw-r--r--collectors/ioping.plugin/integrations/ioping.md133
-rw-r--r--collectors/ioping.plugin/ioping.conf40
-rwxr-xr-xcollectors/ioping.plugin/ioping.plugin.in272
-rw-r--r--collectors/ioping.plugin/metadata.yaml101
-rw-r--r--collectors/log2journal/Makefile.am17
-rw-r--r--collectors/log2journal/README.md912
-rw-r--r--collectors/log2journal/log2journal-help.c377
-rw-r--r--collectors/log2journal/log2journal-inject.c49
-rw-r--r--collectors/log2journal/log2journal-json.c630
-rw-r--r--collectors/log2journal/log2journal-logfmt.c226
-rw-r--r--collectors/log2journal/log2journal-params.c404
-rw-r--r--collectors/log2journal/log2journal-pattern.c54
-rw-r--r--collectors/log2journal/log2journal-pcre2.c139
-rw-r--r--collectors/log2journal/log2journal-rename.c21
-rw-r--r--collectors/log2journal/log2journal-replace.c111
-rw-r--r--collectors/log2journal/log2journal-rewrite.c51
-rw-r--r--collectors/log2journal/log2journal-yaml.c964
-rw-r--r--collectors/log2journal/log2journal.c569
-rw-r--r--collectors/log2journal/log2journal.d/default.yaml15
-rw-r--r--collectors/log2journal/log2journal.d/nginx-combined.yaml91
-rw-r--r--collectors/log2journal/log2journal.d/nginx-json.yaml164
-rw-r--r--collectors/log2journal/log2journal.h501
-rw-r--r--collectors/log2journal/tests.d/default.output20
-rw-r--r--collectors/log2journal/tests.d/full.output77
-rw-r--r--collectors/log2journal/tests.d/full.yaml76
-rw-r--r--collectors/log2journal/tests.d/json-exclude.output153
-rw-r--r--collectors/log2journal/tests.d/json-include.output54
-rw-r--r--collectors/log2journal/tests.d/json.log3
-rw-r--r--collectors/log2journal/tests.d/json.output294
-rw-r--r--collectors/log2journal/tests.d/logfmt.log5
-rw-r--r--collectors/log2journal/tests.d/logfmt.output37
-rw-r--r--collectors/log2journal/tests.d/logfmt.yaml34
-rw-r--r--collectors/log2journal/tests.d/nginx-combined.log14
-rw-r--r--collectors/log2journal/tests.d/nginx-combined.output210
-rw-r--r--collectors/log2journal/tests.d/nginx-json.log9
-rw-r--r--collectors/log2journal/tests.d/nginx-json.output296
-rwxr-xr-xcollectors/log2journal/tests.sh148
-rw-r--r--collectors/macos.plugin/Makefile.am8
l---------collectors/macos.plugin/README.md1
-rw-r--r--collectors/macos.plugin/integrations/macos.md286
-rw-r--r--collectors/macos.plugin/macos_fw.c648
-rw-r--r--collectors/macos.plugin/macos_mach_smi.c227
-rw-r--r--collectors/macos.plugin/macos_sysctl.c1424
-rw-r--r--collectors/macos.plugin/metadata.yaml727
-rw-r--r--collectors/macos.plugin/plugin_macos.c81
-rw-r--r--collectors/macos.plugin/plugin_macos.h12
-rw-r--r--collectors/nfacct.plugin/Makefile.am8
l---------collectors/nfacct.plugin/README.md1
-rw-r--r--collectors/nfacct.plugin/integrations/netfilter.md132
-rw-r--r--collectors/nfacct.plugin/metadata.yaml133
-rw-r--r--collectors/nfacct.plugin/plugin_nfacct.c879
-rw-r--r--collectors/perf.plugin/Makefile.am8
l---------collectors/perf.plugin/README.md1
-rw-r--r--collectors/perf.plugin/integrations/cpu_performance.md192
-rw-r--r--collectors/perf.plugin/metadata.yaml252
-rw-r--r--collectors/perf.plugin/perf_plugin.c1342
-rw-r--r--collectors/plugins.d/Makefile.am12
-rw-r--r--collectors/plugins.d/README.md680
-rw-r--r--collectors/plugins.d/gperf-config.txt58
-rw-r--r--collectors/plugins.d/gperf-hashtable.h177
-rw-r--r--collectors/plugins.d/local_listeners.c400
-rw-r--r--collectors/plugins.d/plugins_d.c362
-rw-r--r--collectors/plugins.d/plugins_d.h67
-rw-r--r--collectors/plugins.d/pluginsd_parser.c3208
-rw-r--r--collectors/plugins.d/pluginsd_parser.h245
-rw-r--r--collectors/proc.plugin/Makefile.am8
-rw-r--r--collectors/proc.plugin/README.md639
-rw-r--r--collectors/proc.plugin/integrations/amd_gpu.md110
-rw-r--r--collectors/proc.plugin/integrations/btrfs.md137
-rw-r--r--collectors/proc.plugin/integrations/conntrack.md105
-rw-r--r--collectors/proc.plugin/integrations/disk_statistics.md149
-rw-r--r--collectors/proc.plugin/integrations/entropy.md133
-rw-r--r--collectors/proc.plugin/integrations/infiniband.md99
-rw-r--r--collectors/proc.plugin/integrations/inter_process_communication.md120
-rw-r--r--collectors/proc.plugin/integrations/interrupts.md141
-rw-r--r--collectors/proc.plugin/integrations/ip_virtual_server.md97
-rw-r--r--collectors/proc.plugin/integrations/ipv6_socket_statistics.md99
-rw-r--r--collectors/proc.plugin/integrations/kernel_same-page_merging.md103
-rw-r--r--collectors/proc.plugin/integrations/md_raid.md125
-rw-r--r--collectors/proc.plugin/integrations/memory_modules_dimms.md146
-rw-r--r--collectors/proc.plugin/integrations/memory_statistics.md138
-rw-r--r--collectors/proc.plugin/integrations/memory_usage.md135
-rw-r--r--collectors/proc.plugin/integrations/network_interfaces.md137
-rw-r--r--collectors/proc.plugin/integrations/network_statistics.md161
-rw-r--r--collectors/proc.plugin/integrations/nfs_client.md99
-rw-r--r--collectors/proc.plugin/integrations/nfs_server.md104
-rw-r--r--collectors/proc.plugin/integrations/non-uniform_memory_access.md111
-rw-r--r--collectors/proc.plugin/integrations/page_types.md113
-rw-r--r--collectors/proc.plugin/integrations/power_supply.md107
-rw-r--r--collectors/proc.plugin/integrations/pressure_stall_information.md129
-rw-r--r--collectors/proc.plugin/integrations/sctp_statistics.md99
-rw-r--r--collectors/proc.plugin/integrations/socket_statistics.md109
-rw-r--r--collectors/proc.plugin/integrations/softirq_statistics.md133
-rw-r--r--collectors/proc.plugin/integrations/softnet_statistics.md135
-rw-r--r--collectors/proc.plugin/integrations/synproxy.md97
-rw-r--r--collectors/proc.plugin/integrations/system_load_average.md128
-rw-r--r--collectors/proc.plugin/integrations/system_statistics.md169
-rw-r--r--collectors/proc.plugin/integrations/system_uptime.md108
-rw-r--r--collectors/proc.plugin/integrations/wireless_network_interfaces.md100
-rw-r--r--collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md125
-rw-r--r--collectors/proc.plugin/integrations/zfs_pools.md105
-rw-r--r--collectors/proc.plugin/integrations/zram.md106
-rw-r--r--collectors/proc.plugin/ipc.c554
-rw-r--r--collectors/proc.plugin/metadata.yaml5299
-rw-r--r--collectors/proc.plugin/plugin_proc.c249
-rw-r--r--collectors/proc.plugin/plugin_proc.h71
-rw-r--r--collectors/proc.plugin/proc_diskstats.c2515
-rw-r--r--collectors/proc.plugin/proc_interrupts.c245
-rw-r--r--collectors/proc.plugin/proc_loadavg.c126
-rw-r--r--collectors/proc.plugin/proc_mdstat.c640
-rw-r--r--collectors/proc.plugin/proc_meminfo.c849
-rw-r--r--collectors/proc.plugin/proc_net_dev.c1956
-rw-r--r--collectors/proc.plugin/proc_net_ip_vs_stats.c123
-rw-r--r--collectors/proc.plugin/proc_net_netstat.c3087
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfs.c439
-rw-r--r--collectors/proc.plugin/proc_net_rpc_nfsd.c763
-rw-r--r--collectors/proc.plugin/proc_net_sctp_snmp.c367
-rw-r--r--collectors/proc.plugin/proc_net_sockstat.c529
-rw-r--r--collectors/proc.plugin/proc_net_sockstat6.c278
-rw-r--r--collectors/proc.plugin/proc_net_softnet_stat.c149
-rw-r--r--collectors/proc.plugin/proc_net_stat_conntrack.c345
-rw-r--r--collectors/proc.plugin/proc_net_stat_synproxy.c153
-rw-r--r--collectors/proc.plugin/proc_net_wireless.c433
-rw-r--r--collectors/proc.plugin/proc_pagetypeinfo.c336
-rw-r--r--collectors/proc.plugin/proc_pressure.c257
-rw-r--r--collectors/proc.plugin/proc_pressure.h44
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.c471
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.h61
-rw-r--r--collectors/proc.plugin/proc_softirqs.c243
-rw-r--r--collectors/proc.plugin/proc_spl_kstat_zfs.c435
-rw-r--r--collectors/proc.plugin/proc_stat.c1070
-rw-r--r--collectors/proc.plugin/proc_sys_fs_file_nr.c81
-rw-r--r--collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c47
-rw-r--r--collectors/proc.plugin/proc_uptime.c42
-rw-r--r--collectors/proc.plugin/proc_vmstat.c810
-rw-r--r--collectors/proc.plugin/sys_block_zram.c285
-rw-r--r--collectors/proc.plugin/sys_class_drm.c1183
-rw-r--r--collectors/proc.plugin/sys_class_infiniband.c705
-rw-r--r--collectors/proc.plugin/sys_class_power_supply.c414
-rw-r--r--collectors/proc.plugin/sys_devices_pci_aer.c340
-rw-r--r--collectors/proc.plugin/sys_devices_system_edac_mc.c298
-rw-r--r--collectors/proc.plugin/sys_devices_system_node.c165
-rw-r--r--collectors/proc.plugin/sys_fs_btrfs.c1155
-rw-r--r--collectors/proc.plugin/sys_kernel_mm_ksm.c194
-rw-r--r--collectors/proc.plugin/zfs_common.c960
-rw-r--r--collectors/proc.plugin/zfs_common.h115
-rw-r--r--collectors/profile.plugin/Makefile.am8
-rw-r--r--collectors/profile.plugin/README.md34
-rw-r--r--collectors/profile.plugin/plugin_profile.cc228
-rw-r--r--collectors/python.d.plugin/Makefile.am229
-rw-r--r--collectors/python.d.plugin/README.md77
-rw-r--r--collectors/python.d.plugin/adaptec_raid/Makefile.inc13
l---------collectors/python.d.plugin/adaptec_raid/README.md1
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py247
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf53
-rw-r--r--collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md204
-rw-r--r--collectors/python.d.plugin/adaptec_raid/metadata.yaml167
-rw-r--r--collectors/python.d.plugin/alarms/Makefile.inc13
l---------collectors/python.d.plugin/alarms/README.md1
-rw-r--r--collectors/python.d.plugin/alarms/alarms.chart.py95
-rw-r--r--collectors/python.d.plugin/alarms/alarms.conf60
-rw-r--r--collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md201
-rw-r--r--collectors/python.d.plugin/alarms/metadata.yaml177
-rw-r--r--collectors/python.d.plugin/am2320/Makefile.inc8
l---------collectors/python.d.plugin/am2320/README.md1
-rw-r--r--collectors/python.d.plugin/am2320/am2320.chart.py68
-rw-r--r--collectors/python.d.plugin/am2320/am2320.conf68
-rw-r--r--collectors/python.d.plugin/am2320/integrations/am2320.md181
-rw-r--r--collectors/python.d.plugin/am2320/metadata.yaml135
-rw-r--r--collectors/python.d.plugin/anomalies/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/anomalies/README.md248
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.chart.py425
-rw-r--r--collectors/python.d.plugin/anomalies/anomalies.conf184
-rw-r--r--collectors/python.d.plugin/anomalies/metadata.yaml87
-rw-r--r--collectors/python.d.plugin/beanstalk/Makefile.inc13
l---------collectors/python.d.plugin/beanstalk/README.md1
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.chart.py252
-rw-r--r--collectors/python.d.plugin/beanstalk/beanstalk.conf78
-rw-r--r--collectors/python.d.plugin/beanstalk/integrations/beanstalk.md219
-rw-r--r--collectors/python.d.plugin/beanstalk/metadata.yaml263
-rw-r--r--collectors/python.d.plugin/bind_rndc/Makefile.inc13
l---------collectors/python.d.plugin/bind_rndc/README.md1
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py252
-rw-r--r--collectors/python.d.plugin/bind_rndc/bind_rndc.conf108
-rw-r--r--collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md215
-rw-r--r--collectors/python.d.plugin/bind_rndc/metadata.yaml191
-rw-r--r--collectors/python.d.plugin/boinc/Makefile.inc13
l---------collectors/python.d.plugin/boinc/README.md1
-rw-r--r--collectors/python.d.plugin/boinc/boinc.chart.py168
-rw-r--r--collectors/python.d.plugin/boinc/boinc.conf66
-rw-r--r--collectors/python.d.plugin/boinc/integrations/boinc.md204
-rw-r--r--collectors/python.d.plugin/boinc/metadata.yaml198
-rw-r--r--collectors/python.d.plugin/ceph/Makefile.inc13
l---------collectors/python.d.plugin/ceph/README.md1
-rw-r--r--collectors/python.d.plugin/ceph/ceph.chart.py374
-rw-r--r--collectors/python.d.plugin/ceph/ceph.conf75
-rw-r--r--collectors/python.d.plugin/ceph/integrations/ceph.md194
-rw-r--r--collectors/python.d.plugin/ceph/metadata.yaml223
-rw-r--r--collectors/python.d.plugin/changefinder/Makefile.inc13
l---------collectors/python.d.plugin/changefinder/README.md1
-rw-r--r--collectors/python.d.plugin/changefinder/changefinder.chart.py185
-rw-r--r--collectors/python.d.plugin/changefinder/changefinder.conf74
-rw-r--r--collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md217
-rw-r--r--collectors/python.d.plugin/changefinder/metadata.yaml212
-rw-r--r--collectors/python.d.plugin/dovecot/Makefile.inc13
l---------collectors/python.d.plugin/dovecot/README.md1
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.chart.py143
-rw-r--r--collectors/python.d.plugin/dovecot/dovecot.conf98
-rw-r--r--collectors/python.d.plugin/dovecot/integrations/dovecot.md197
-rw-r--r--collectors/python.d.plugin/dovecot/metadata.yaml207
-rw-r--r--collectors/python.d.plugin/example/Makefile.inc13
l---------collectors/python.d.plugin/example/README.md1
-rw-r--r--collectors/python.d.plugin/example/example.chart.py51
-rw-r--r--collectors/python.d.plugin/example/example.conf87
-rw-r--r--collectors/python.d.plugin/example/integrations/example_collector.md171
-rw-r--r--collectors/python.d.plugin/example/metadata.yaml138
-rw-r--r--collectors/python.d.plugin/exim/Makefile.inc13
l---------collectors/python.d.plugin/exim/README.md1
-rw-r--r--collectors/python.d.plugin/exim/exim.chart.py39
-rw-r--r--collectors/python.d.plugin/exim/exim.conf91
-rw-r--r--collectors/python.d.plugin/exim/integrations/exim.md181
-rw-r--r--collectors/python.d.plugin/exim/metadata.yaml132
-rw-r--r--collectors/python.d.plugin/fail2ban/Makefile.inc13
l---------collectors/python.d.plugin/fail2ban/README.md1
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.chart.py217
-rw-r--r--collectors/python.d.plugin/fail2ban/fail2ban.conf68
-rw-r--r--collectors/python.d.plugin/fail2ban/integrations/fail2ban.md209
-rw-r--r--collectors/python.d.plugin/fail2ban/metadata.yaml200
-rw-r--r--collectors/python.d.plugin/gearman/Makefile.inc13
l---------collectors/python.d.plugin/gearman/README.md1
-rw-r--r--collectors/python.d.plugin/gearman/gearman.chart.py243
-rw-r--r--collectors/python.d.plugin/gearman/gearman.conf75
-rw-r--r--collectors/python.d.plugin/gearman/integrations/gearman.md210
-rw-r--r--collectors/python.d.plugin/gearman/metadata.yaml168
-rw-r--r--collectors/python.d.plugin/go_expvar/Makefile.inc13
l---------collectors/python.d.plugin/go_expvar/README.md1
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.chart.py253
-rw-r--r--collectors/python.d.plugin/go_expvar/go_expvar.conf108
-rw-r--r--collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md335
-rw-r--r--collectors/python.d.plugin/go_expvar/metadata.yaml329
-rw-r--r--collectors/python.d.plugin/haproxy/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/haproxy/README.md90
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py368
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.conf83
-rw-r--r--collectors/python.d.plugin/haproxy/metadata.yaml322
-rw-r--r--collectors/python.d.plugin/hddtemp/Makefile.inc13
l---------collectors/python.d.plugin/hddtemp/README.md1
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.chart.py99
-rw-r--r--collectors/python.d.plugin/hddtemp/hddtemp.conf95
-rw-r--r--collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md217
-rw-r--r--collectors/python.d.plugin/hddtemp/metadata.yaml163
-rw-r--r--collectors/python.d.plugin/hpssa/Makefile.inc13
l---------collectors/python.d.plugin/hpssa/README.md1
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.chart.py396
-rw-r--r--collectors/python.d.plugin/hpssa/hpssa.conf61
-rw-r--r--collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md205
-rw-r--r--collectors/python.d.plugin/hpssa/metadata.yaml185
-rw-r--r--collectors/python.d.plugin/icecast/Makefile.inc13
l---------collectors/python.d.plugin/icecast/README.md1
-rw-r--r--collectors/python.d.plugin/icecast/icecast.chart.py94
-rw-r--r--collectors/python.d.plugin/icecast/icecast.conf81
-rw-r--r--collectors/python.d.plugin/icecast/integrations/icecast.md166
-rw-r--r--collectors/python.d.plugin/icecast/metadata.yaml127
-rw-r--r--collectors/python.d.plugin/ipfs/Makefile.inc13
l---------collectors/python.d.plugin/ipfs/README.md1
-rw-r--r--collectors/python.d.plugin/ipfs/integrations/ipfs.md203
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.chart.py149
-rw-r--r--collectors/python.d.plugin/ipfs/ipfs.conf82
-rw-r--r--collectors/python.d.plugin/ipfs/metadata.yaml172
-rw-r--r--collectors/python.d.plugin/litespeed/Makefile.inc13
l---------collectors/python.d.plugin/litespeed/README.md1
-rw-r--r--collectors/python.d.plugin/litespeed/integrations/litespeed.md170
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.chart.py188
-rw-r--r--collectors/python.d.plugin/litespeed/litespeed.conf72
-rw-r--r--collectors/python.d.plugin/litespeed/metadata.yaml168
-rw-r--r--collectors/python.d.plugin/megacli/Makefile.inc13
l---------collectors/python.d.plugin/megacli/README.md1
-rw-r--r--collectors/python.d.plugin/megacli/integrations/megacli.md220
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py278
-rw-r--r--collectors/python.d.plugin/megacli/megacli.conf60
-rw-r--r--collectors/python.d.plugin/megacli/metadata.yaml193
-rw-r--r--collectors/python.d.plugin/memcached/Makefile.inc13
l---------collectors/python.d.plugin/memcached/README.md1
-rw-r--r--collectors/python.d.plugin/memcached/integrations/memcached.md215
-rw-r--r--collectors/python.d.plugin/memcached/memcached.chart.py197
-rw-r--r--collectors/python.d.plugin/memcached/memcached.conf90
-rw-r--r--collectors/python.d.plugin/memcached/metadata.yaml247
-rw-r--r--collectors/python.d.plugin/monit/Makefile.inc13
l---------collectors/python.d.plugin/monit/README.md1
-rw-r--r--collectors/python.d.plugin/monit/integrations/monit.md214
-rw-r--r--collectors/python.d.plugin/monit/metadata.yaml217
-rw-r--r--collectors/python.d.plugin/monit/monit.chart.py360
-rw-r--r--collectors/python.d.plugin/monit/monit.conf86
-rw-r--r--collectors/python.d.plugin/nsd/Makefile.inc13
l---------collectors/python.d.plugin/nsd/README.md1
-rw-r--r--collectors/python.d.plugin/nsd/integrations/name_server_daemon.md199
-rw-r--r--collectors/python.d.plugin/nsd/metadata.yaml201
-rw-r--r--collectors/python.d.plugin/nsd/nsd.chart.py105
-rw-r--r--collectors/python.d.plugin/nsd/nsd.conf91
-rw-r--r--collectors/python.d.plugin/nvidia_smi/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md157
-rw-r--r--collectors/python.d.plugin/nvidia_smi/metadata.yaml166
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py651
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf68
-rw-r--r--collectors/python.d.plugin/openldap/Makefile.inc13
l---------collectors/python.d.plugin/openldap/README.md1
-rw-r--r--collectors/python.d.plugin/openldap/integrations/openldap.md215
-rw-r--r--collectors/python.d.plugin/openldap/metadata.yaml225
-rw-r--r--collectors/python.d.plugin/openldap/openldap.chart.py216
-rw-r--r--collectors/python.d.plugin/openldap/openldap.conf75
-rw-r--r--collectors/python.d.plugin/oracledb/Makefile.inc13
l---------collectors/python.d.plugin/oracledb/README.md1
-rw-r--r--collectors/python.d.plugin/oracledb/integrations/oracle_db.md226
-rw-r--r--collectors/python.d.plugin/oracledb/metadata.yaml309
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py846
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.conf88
-rw-r--r--collectors/python.d.plugin/pandas/Makefile.inc13
l---------collectors/python.d.plugin/pandas/README.md1
-rw-r--r--collectors/python.d.plugin/pandas/integrations/pandas.md365
-rw-r--r--collectors/python.d.plugin/pandas/metadata.yaml308
-rw-r--r--collectors/python.d.plugin/pandas/pandas.chart.py99
-rw-r--r--collectors/python.d.plugin/pandas/pandas.conf211
-rw-r--r--collectors/python.d.plugin/postfix/Makefile.inc13
l---------collectors/python.d.plugin/postfix/README.md1
-rw-r--r--collectors/python.d.plugin/postfix/integrations/postfix.md151
-rw-r--r--collectors/python.d.plugin/postfix/metadata.yaml124
-rw-r--r--collectors/python.d.plugin/postfix/postfix.chart.py52
-rw-r--r--collectors/python.d.plugin/postfix/postfix.conf72
-rw-r--r--collectors/python.d.plugin/puppet/Makefile.inc13
l---------collectors/python.d.plugin/puppet/README.md1
-rw-r--r--collectors/python.d.plugin/puppet/integrations/puppet.md215
-rw-r--r--collectors/python.d.plugin/puppet/metadata.yaml185
-rw-r--r--collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--collectors/python.d.plugin/puppet/puppet.conf94
-rw-r--r--collectors/python.d.plugin/python.d.conf78
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in946
-rw-r--r--collectors/python.d.plugin/python_modules/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py91
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py82
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py163
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py261
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py336
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py188
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/bases/charts.py431
-rw-r--r--collectors/python.d.plugin/python_modules/bases/collection.py117
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loaders.py46
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py198
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/filelock.py451
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/lm_sensors.py327
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/monotonic.py201
-rw-r--r--collectors/python.d.plugin/python_modules/third_party/ordereddict.py110
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/_collections.py320
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/selectors.py588
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
-rw-r--r--collectors/python.d.plugin/rethinkdbs/Makefile.inc13
l---------collectors/python.d.plugin/rethinkdbs/README.md1
-rw-r--r--collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md190
-rw-r--r--collectors/python.d.plugin/rethinkdbs/metadata.yaml188
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py247
-rw-r--r--collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf76
-rw-r--r--collectors/python.d.plugin/retroshare/Makefile.inc13
l---------collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--collectors/python.d.plugin/retroshare/integrations/retroshare.md191
-rw-r--r--collectors/python.d.plugin/retroshare/metadata.yaml144
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.chart.py78
-rw-r--r--collectors/python.d.plugin/retroshare/retroshare.conf72
-rw-r--r--collectors/python.d.plugin/riakkv/Makefile.inc13
l---------collectors/python.d.plugin/riakkv/README.md1
-rw-r--r--collectors/python.d.plugin/riakkv/integrations/riakkv.md220
-rw-r--r--collectors/python.d.plugin/riakkv/metadata.yaml358
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.chart.py334
-rw-r--r--collectors/python.d.plugin/riakkv/riakkv.conf68
-rw-r--r--collectors/python.d.plugin/samba/Makefile.inc13
l---------collectors/python.d.plugin/samba/README.md1
-rw-r--r--collectors/python.d.plugin/samba/integrations/samba.md221
-rw-r--r--collectors/python.d.plugin/samba/metadata.yaml205
-rw-r--r--collectors/python.d.plugin/samba/samba.chart.py144
-rw-r--r--collectors/python.d.plugin/samba/samba.conf60
-rw-r--r--collectors/python.d.plugin/sensors/Makefile.inc13
l---------collectors/python.d.plugin/sensors/README.md1
-rw-r--r--collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md187
-rw-r--r--collectors/python.d.plugin/sensors/metadata.yaml184
-rw-r--r--collectors/python.d.plugin/sensors/sensors.chart.py179
-rw-r--r--collectors/python.d.plugin/sensors/sensors.conf61
-rw-r--r--collectors/python.d.plugin/smartd_log/Makefile.inc13
l---------collectors/python.d.plugin/smartd_log/README.md1
-rw-r--r--collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md223
-rw-r--r--collectors/python.d.plugin/smartd_log/metadata.yaml429
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py790
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf76
-rw-r--r--collectors/python.d.plugin/spigotmc/Makefile.inc13
l---------collectors/python.d.plugin/spigotmc/README.md1
-rw-r--r--collectors/python.d.plugin/spigotmc/integrations/spigotmc.md216
-rw-r--r--collectors/python.d.plugin/spigotmc/metadata.yaml176
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.chart.py184
-rw-r--r--collectors/python.d.plugin/spigotmc/spigotmc.conf66
-rw-r--r--collectors/python.d.plugin/squid/Makefile.inc13
l---------collectors/python.d.plugin/squid/README.md1
-rw-r--r--collectors/python.d.plugin/squid/integrations/squid.md199
-rw-r--r--collectors/python.d.plugin/squid/metadata.yaml174
-rw-r--r--collectors/python.d.plugin/squid/squid.chart.py123
-rw-r--r--collectors/python.d.plugin/squid/squid.conf167
-rw-r--r--collectors/python.d.plugin/tomcat/Makefile.inc13
l---------collectors/python.d.plugin/tomcat/README.md1
-rw-r--r--collectors/python.d.plugin/tomcat/integrations/tomcat.md203
-rw-r--r--collectors/python.d.plugin/tomcat/metadata.yaml200
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.chart.py199
-rw-r--r--collectors/python.d.plugin/tomcat/tomcat.conf89
-rw-r--r--collectors/python.d.plugin/tor/Makefile.inc13
l---------collectors/python.d.plugin/tor/README.md1
-rw-r--r--collectors/python.d.plugin/tor/integrations/tor.md197
-rw-r--r--collectors/python.d.plugin/tor/metadata.yaml143
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py109
-rw-r--r--collectors/python.d.plugin/tor/tor.conf81
-rw-r--r--collectors/python.d.plugin/traefik/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/traefik/README.md98
-rw-r--r--collectors/python.d.plugin/traefik/metadata.yaml125
-rw-r--r--collectors/python.d.plugin/traefik/traefik.chart.py198
-rw-r--r--collectors/python.d.plugin/traefik/traefik.conf77
-rw-r--r--collectors/python.d.plugin/uwsgi/Makefile.inc13
l---------collectors/python.d.plugin/uwsgi/README.md1
-rw-r--r--collectors/python.d.plugin/uwsgi/integrations/uwsgi.md219
-rw-r--r--collectors/python.d.plugin/uwsgi/metadata.yaml201
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.chart.py177
-rw-r--r--collectors/python.d.plugin/uwsgi/uwsgi.conf92
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
l---------collectors/python.d.plugin/varnish/README.md1
-rw-r--r--collectors/python.d.plugin/varnish/integrations/varnish.md213
-rw-r--r--collectors/python.d.plugin/varnish/metadata.yaml253
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py385
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf66
-rw-r--r--collectors/python.d.plugin/w1sensor/Makefile.inc13
l---------collectors/python.d.plugin/w1sensor/README.md1
-rw-r--r--collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md167
-rw-r--r--collectors/python.d.plugin/w1sensor/metadata.yaml119
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py97
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.conf72
-rw-r--r--collectors/python.d.plugin/zscores/Makefile.inc12
l---------collectors/python.d.plugin/zscores/README.md1
-rw-r--r--collectors/python.d.plugin/zscores/integrations/python.d_zscores.md195
-rw-r--r--collectors/python.d.plugin/zscores/metadata.yaml187
-rw-r--r--collectors/python.d.plugin/zscores/zscores.chart.py146
-rw-r--r--collectors/python.d.plugin/zscores/zscores.conf108
-rw-r--r--collectors/slabinfo.plugin/Makefile.am14
l---------collectors/slabinfo.plugin/README.md1
-rw-r--r--collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md131
-rw-r--r--collectors/slabinfo.plugin/metadata.yaml108
-rw-r--r--collectors/slabinfo.plugin/slabinfo.c393
-rw-r--r--collectors/statsd.plugin/Makefile.am23
-rw-r--r--collectors/statsd.plugin/README.md1052
-rw-r--r--collectors/statsd.plugin/asterisk.conf208
-rw-r--r--collectors/statsd.plugin/asterisk.md62
-rw-r--r--collectors/statsd.plugin/example.conf64
-rw-r--r--collectors/statsd.plugin/k6.conf110
-rw-r--r--collectors/statsd.plugin/k6.md77
-rw-r--r--collectors/statsd.plugin/statsd.c2893
-rw-r--r--collectors/systemd-journal.plugin/Makefile.am16
-rw-r--r--collectors/systemd-journal.plugin/README.md472
-rw-r--r--collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md126
-rw-r--r--collectors/systemd-journal.plugin/forward_secure_sealing.md80
-rw-r--r--collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md150
-rw-r--r--collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md250
-rw-r--r--collectors/systemd-journal.plugin/systemd-internals.h162
-rw-r--r--collectors/systemd-journal.plugin/systemd-journal-annotations.c719
-rw-r--r--collectors/systemd-journal.plugin/systemd-journal-files.c857
-rw-r--r--collectors/systemd-journal.plugin/systemd-journal-fstat.c74
-rwxr-xr-xcollectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh267
-rw-r--r--collectors/systemd-journal.plugin/systemd-journal-watcher.c379
-rw-r--r--collectors/systemd-journal.plugin/systemd-journal.c2139
-rw-r--r--collectors/systemd-journal.plugin/systemd-main.c112
-rw-r--r--collectors/systemd-journal.plugin/systemd-units.c1965
-rw-r--r--collectors/tc.plugin/Makefile.am20
l---------collectors/tc.plugin/README.md1
-rw-r--r--collectors/tc.plugin/integrations/tc_qos_classes.md171
-rw-r--r--collectors/tc.plugin/metadata.yaml148
-rw-r--r--collectors/tc.plugin/plugin_tc.c1183
-rwxr-xr-xcollectors/tc.plugin/tc-qos-helper.sh.in356
-rw-r--r--collectors/timex.plugin/Makefile.am8
l---------collectors/timex.plugin/README.md1
-rw-r--r--collectors/timex.plugin/integrations/timex.md143
-rw-r--r--collectors/timex.plugin/metadata.yaml112
-rw-r--r--collectors/timex.plugin/plugin_timex.c176
-rw-r--r--collectors/xenstat.plugin/Makefile.am8
l---------collectors/xenstat.plugin/README.md1
-rw-r--r--collectors/xenstat.plugin/integrations/xen_xcp-ng.md176
-rw-r--r--collectors/xenstat.plugin/metadata.yaml195
-rw-r--r--collectors/xenstat.plugin/xenstat_plugin.c1066
791 files changed, 0 insertions, 209774 deletions
diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md
deleted file mode 100644
index 9a3499593..000000000
--- a/collectors/COLLECTORS.md
+++ /dev/null
@@ -1,1194 +0,0 @@
-# Monitor anything with Netdata
-
-Netdata uses collectors to help you gather metrics from your favorite applications and services and view them in
-real-time, interactive charts. The following list includes collectors for both external services/applications and
-internal system metrics.
-
-Learn more
-about [how collectors work](https://github.com/netdata/netdata/blob/master/collectors/README.md), and
-then learn how to [enable or
-configure](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md#enable-and-disable-a-specific-collection-module) any of the below collectors using the same process.
-
-Some collectors have both Go and Python versions as we continue our effort to migrate all collectors to Go. In these
-cases, _Netdata always prioritizes the Go version_, and we highly recommend you use the Go versions for the best
-experience.
-
-If you want to use a Python version of a collector, you need to
-explicitly [disable the Go version](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md#enable-and-disable-a-specific-collection-module),
-and enable the Python version. Netdata then skips the Go version and attempts to load the Python version and its
-accompanying configuration file.
-
-## Add your application to Netdata
-
-If you don't see the app/service you'd like to monitor in this list:
-
-- If your application has a Prometheus endpoint, Netdata can monitor it! Look at our
- [generic Prometheus collector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md).
-
-- If your application is instrumented to expose [StatsD](https://blog.netdata.cloud/introduction-to-statsd/) metrics,
- see our [generic StatsD collector](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md).
-
-- If you have data in CSV, JSON, XML or other popular formats, you may be able to use our
- [generic structured data (Pandas) collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/pandas/README.md),
-
-- Check out our [GitHub issues](https://github.com/netdata/netdata/issues). Use the search bar to look for previous
- discussions about that collector—we may be looking for assistance from users such as yourself!
-
-- If you don't see the collector there, you can make
- a [feature request](https://github.com/netdata/netdata/issues/new/choose) on GitHub.
-
-- If you have basic software development skills, you can add your own plugin
- in [Go](https://github.com/netdata/go.d.plugin/blob/master/README.md#how-to-develop-a-collector)
- or [Python](https://github.com/netdata/netdata/blob/master/docs/guides/python-collector.md)
-
-## Available Data Collection Integrations
-<!-- AUTOGENERATED PART BY integrations/gen_doc_collector_page.py SCRIPT, DO NOT EDIT MANUALLY -->
-### APM
-
-- [Alamos FE2 server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/alamos_fe2_server.md)
-
-- [Apache Airflow](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apache_airflow.md)
-
-- [Apache Flink](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apache_flink.md)
-
-- [Audisto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/audisto.md)
-
-- [Dependency-Track](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dependency-track.md)
-
-- [Go applications (EXPVAR)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md)
-
-- [Google Pagespeed](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_pagespeed.md)
-
-- [IBM AIX systems Njmon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_aix_systems_njmon.md)
-
-- [JMX](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jmx.md)
-
-- [Java Spring-boot 2 applications](https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/integrations/java_spring-boot_2_applications.md)
-
-- [NRPE daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nrpe_daemon.md)
-
-- [Sentry](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sentry.md)
-
-- [Sysload](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sysload.md)
-
-- [VSCode](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vscode.md)
-
-- [YOURLS URL Shortener](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/yourls_url_shortener.md)
-
-- [bpftrace variables](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bpftrace_variables.md)
-
-- [gpsd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gpsd.md)
-
-- [jolokia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jolokia.md)
-
-- [phpDaemon](https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/integrations/phpdaemon.md)
-
-### Authentication and Authorization
-
-- [Fail2ban](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md)
-
-- [FreeRADIUS](https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/integrations/freeradius.md)
-
-- [HashiCorp Vault secrets](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hashicorp_vault_secrets.md)
-
-- [LDAP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ldap.md)
-
-- [OpenLDAP (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openldap_community.md)
-
-- [OpenLDAP](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/openldap/integrations/openldap.md)
-
-- [RADIUS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/radius.md)
-
-- [SSH](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ssh.md)
-
-- [TACACS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tacacs.md)
-
-### Blockchain Servers
-
-- [Chia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/chia.md)
-
-- [Crypto exchanges](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/crypto_exchanges.md)
-
-- [Cryptowatch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cryptowatch.md)
-
-- [Energi Core Wallet](https://github.com/netdata/go.d.plugin/blob/master/modules/energid/integrations/energi_core_wallet.md)
-
-- [Go-ethereum](https://github.com/netdata/go.d.plugin/blob/master/modules/geth/integrations/go-ethereum.md)
-
-- [Helium miner (validator)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/helium_miner_validator.md)
-
-- [IOTA full node](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/iota_full_node.md)
-
-- [Sia](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sia.md)
-
-### CICD Platforms
-
-- [Concourse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/concourse.md)
-
-- [GitLab Runner](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gitlab_runner.md)
-
-- [Jenkins](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jenkins.md)
-
-- [Puppet](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/puppet/integrations/puppet.md)
-
-### Cloud Provider Managed
-
-- [AWS EC2 Compute instances](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ec2_compute_instances.md)
-
-- [AWS EC2 Spot Instance](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ec2_spot_instance.md)
-
-- [AWS ECS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_ecs.md)
-
-- [AWS Health events](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_health_events.md)
-
-- [AWS Quota](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_quota.md)
-
-- [AWS S3 buckets](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_s3_buckets.md)
-
-- [AWS SQS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_sqs.md)
-
-- [AWS instance health](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_instance_health.md)
-
-- [Akamai Global Traffic Management](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akamai_global_traffic_management.md)
-
-- [Akami Cloudmonitor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akami_cloudmonitor.md)
-
-- [Alibaba Cloud](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/alibaba_cloud.md)
-
-- [ArvanCloud CDN](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/arvancloud_cdn.md)
-
-- [Azure AD App passwords](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_ad_app_passwords.md)
-
-- [Azure Elastic Pool SQL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_elastic_pool_sql.md)
-
-- [Azure Resources](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_resources.md)
-
-- [Azure SQL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_sql.md)
-
-- [Azure Service Bus](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_service_bus.md)
-
-- [Azure application](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/azure_application.md)
-
-- [BigQuery](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bigquery.md)
-
-- [CloudWatch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloudwatch.md)
-
-- [Dell EMC ECS cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_ecs_cluster.md)
-
-- [DigitalOcean](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/digitalocean.md)
-
-- [GCP GCE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gcp_gce.md)
-
-- [GCP Quota](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gcp_quota.md)
-
-- [Google Cloud Platform](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_cloud_platform.md)
-
-- [Google Stackdriver](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/google_stackdriver.md)
-
-- [Linode](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/linode.md)
-
-- [Lustre metadata](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lustre_metadata.md)
-
-- [Nextcloud servers](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nextcloud_servers.md)
-
-- [OpenStack](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openstack.md)
-
-- [Zerto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zerto.md)
-
-### Containers and VMs
-
-- [Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/containers.md)
-
-- [Docker Engine](https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/integrations/docker_engine.md)
-
-- [Docker Hub repository](https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/integrations/docker_hub_repository.md)
-
-- [Docker](https://github.com/netdata/go.d.plugin/blob/master/modules/docker/integrations/docker.md)
-
-- [LXC Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/lxc_containers.md)
-
-- [Libvirt Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/libvirt_containers.md)
-
-- [NSX-T](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nsx-t.md)
-
-- [Podman](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/podman.md)
-
-- [Proxmox Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/proxmox_containers.md)
-
-- [Proxmox VE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/proxmox_ve.md)
-
-- [VMware vCenter Server](https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/integrations/vmware_vcenter_server.md)
-
-- [Virtual Machines](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/virtual_machines.md)
-
-- [Xen XCP-ng](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/integrations/xen_xcp-ng.md)
-
-- [cAdvisor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cadvisor.md)
-
-- [oVirt Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/ovirt_containers.md)
-
-- [vCenter Server Appliance](https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/integrations/vcenter_server_appliance.md)
-
-### Databases
-
-- [4D Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/4d_server.md)
-
-- [AWS RDS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aws_rds.md)
-
-- [Cassandra](https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/integrations/cassandra.md)
-
-- [ClickHouse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clickhouse.md)
-
-- [ClusterControl CMON](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clustercontrol_cmon.md)
-
-- [CockroachDB](https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/integrations/cockroachdb.md)
-
-- [CouchDB](https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/integrations/couchdb.md)
-
-- [Couchbase](https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/integrations/couchbase.md)
-
-- [HANA](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hana.md)
-
-- [Hasura GraphQL Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hasura_graphql_server.md)
-
-- [InfluxDB](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/influxdb.md)
-
-- [Machbase](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/machbase.md)
-
-- [MariaDB](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/mariadb.md)
-
-- [Memcached (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/memcached_community.md)
-
-- [Memcached](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/memcached/integrations/memcached.md)
-
-- [MongoDB](https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/integrations/mongodb.md)
-
-- [MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/mysql.md)
-
-- [ODBC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/odbc.md)
-
-- [Oracle DB (community)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/oracle_db_community.md)
-
-- [Oracle DB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/oracledb/integrations/oracle_db.md)
-
-- [Patroni](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/patroni.md)
-
-- [Percona MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/integrations/percona_mysql.md)
-
-- [PgBouncer](https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/integrations/pgbouncer.md)
-
-- [Pgpool-II](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pgpool-ii.md)
-
-- [Pika](https://github.com/netdata/go.d.plugin/blob/master/modules/pika/integrations/pika.md)
-
-- [PostgreSQL](https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/integrations/postgresql.md)
-
-- [ProxySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/integrations/proxysql.md)
-
-- [Redis](https://github.com/netdata/go.d.plugin/blob/master/modules/redis/integrations/redis.md)
-
-- [RethinkDB](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md)
-
-- [RiakKV](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/riakkv/integrations/riakkv.md)
-
-- [SQL Database agnostic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sql_database_agnostic.md)
-
-- [Vertica](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vertica.md)
-
-- [Warp10](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/warp10.md)
-
-- [pgBackRest](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pgbackrest.md)
-
-### Distributed Computing Systems
-
-- [BOINC](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/boinc/integrations/boinc.md)
-
-- [Gearman](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/gearman/integrations/gearman.md)
-
-### DNS and DHCP Servers
-
-- [Akamai Edge DNS Traffic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/akamai_edge_dns_traffic.md)
-
-- [CoreDNS](https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/integrations/coredns.md)
-
-- [DNS query](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/integrations/dns_query.md)
-
-- [DNSBL](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dnsbl.md)
-
-- [DNSdist](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/integrations/dnsdist.md)
-
-- [Dnsmasq DHCP](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md)
-
-- [Dnsmasq](https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/integrations/dnsmasq.md)
-
-- [ISC Bind (RNDC)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md)
-
-- [ISC DHCP](https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/integrations/isc_dhcp.md)
-
-- [Name Server Daemon](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md)
-
-- [NextDNS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nextdns.md)
-
-- [Pi-hole](https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/integrations/pi-hole.md)
-
-- [PowerDNS Authoritative Server](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/integrations/powerdns_authoritative_server.md)
-
-- [PowerDNS Recursor](https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/integrations/powerdns_recursor.md)
-
-- [Unbound](https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/integrations/unbound.md)
-
-### eBPF
-
-- [eBPF Cachestat](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_cachestat.md)
-
-- [eBPF DCstat](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_dcstat.md)
-
-- [eBPF Disk](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_disk.md)
-
-- [eBPF Filedescriptor](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md)
-
-- [eBPF Filesystem](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_filesystem.md)
-
-- [eBPF Hardirq](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_hardirq.md)
-
-- [eBPF MDflush](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_mdflush.md)
-
-- [eBPF Mount](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_mount.md)
-
-- [eBPF OOMkill](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_oomkill.md)
-
-- [eBPF Process](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_process.md)
-
-- [eBPF Processes](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_processes.md)
-
-- [eBPF SHM](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_shm.md)
-
-- [eBPF SWAP](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_swap.md)
-
-- [eBPF Socket](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_socket.md)
-
-- [eBPF SoftIRQ](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_softirq.md)
-
-- [eBPF Sync](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_sync.md)
-
-- [eBPF VFS](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/integrations/ebpf_vfs.md)
-
-### FreeBSD
-
-- [FreeBSD NFS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freebsd_nfs.md)
-
-- [FreeBSD RCTL-RACCT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freebsd_rctl-racct.md)
-
-- [dev.cpu.0.freq](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md)
-
-- [dev.cpu.temperature](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md)
-
-- [devstat](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/devstat.md)
-
-- [getifaddrs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/getifaddrs.md)
-
-- [getmntinfo](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/getmntinfo.md)
-
-- [hw.intrcnt](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/hw.intrcnt.md)
-
-- [ipfw](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/ipfw.md)
-
-- [kern.cp_time](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.cp_time.md)
-
-- [kern.ipc.msq](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.msq.md)
-
-- [kern.ipc.sem](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.sem.md)
-
-- [kern.ipc.shm](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/kern.ipc.shm.md)
-
-- [net.inet.icmp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md)
-
-- [net.inet.ip.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md)
-
-- [net.inet.tcp.states](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md)
-
-- [net.inet.tcp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md)
-
-- [net.inet.udp.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md)
-
-- [net.inet6.icmp6.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md)
-
-- [net.inet6.ip6.stats](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md)
-
-- [net.isr](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/net.isr.md)
-
-- [system.ram](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/system.ram.md)
-
-- [uptime](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/uptime.md)
-
-- [vm.loadavg](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.loadavg.md)
-
-- [vm.stats.sys.v_intr](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md)
-
-- [vm.stats.sys.v_soft](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md)
-
-- [vm.stats.sys.v_swtch](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md)
-
-- [vm.stats.vm.v_pgfaults](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md)
-
-- [vm.stats.vm.v_swappgs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md)
-
-- [vm.swap_info](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.swap_info.md)
-
-- [vm.vmtotal](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/vm.vmtotal.md)
-
-- [zfs](https://github.com/netdata/netdata/blob/master/collectors/freebsd.plugin/integrations/zfs.md)
-
-### FTP Servers
-
-- [ProFTPD](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/proftpd.md)
-
-### Gaming
-
-- [BungeeCord](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bungeecord.md)
-
-- [CS:GO](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cs:go.md)
-
-- [Minecraft](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/minecraft.md)
-
-- [OpenRCT2](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openrct2.md)
-
-- [SpigotMC](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md)
-
-- [Steam](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/steam.md)
-
-### Generic Data Collection
-
-- [Custom Exporter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/custom_exporter.md)
-
-- [Excel spreadsheet](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/excel_spreadsheet.md)
-
-- [Generic Command Line Output](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/generic_command_line_output.md)
-
-- [JetBrains Floating License Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jetbrains_floating_license_server.md)
-
-- [OpenWeatherMap](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openweathermap.md)
-
-- [Pandas](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/pandas/integrations/pandas.md)
-
-- [Prometheus endpoint](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/prometheus_endpoint.md)
-
-- [SNMP devices](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/integrations/snmp_devices.md)
-
-- [Shell command](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/shell_command.md)
-
-- [Tankerkoenig API](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tankerkoenig_api.md)
-
-- [TwinCAT ADS Web Service](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/twincat_ads_web_service.md)
-
-### Hardware Devices and Sensors
-
-- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md)
-
-- [AM2320](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/am2320/integrations/am2320.md)
-
-- [AMD CPU & GPU](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/amd_cpu_&_gpu.md)
-
-- [AMD GPU](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/amd_gpu.md)
-
-- [ARM HWCPipe](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/arm_hwcpipe.md)
-
-- [CUPS](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/integrations/cups.md)
-
-- [HDD temperature](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md)
-
-- [HP iLO](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hp_ilo.md)
-
-- [IBM CryptoExpress (CEX) cards](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md)
-
-- [IBM Z Hardware Management Console](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_z_hardware_management_console.md)
-
-- [IPMI (By SoundCloud)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ipmi_by_soundcloud.md)
-
-- [Intelligent Platform Management Interface (IPMI)](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md)
-
-- [Linux Sensors (lm-sensors)](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md)
-
-- [Linux Sensors (sysfs)](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md)
-
-- [NVML](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nvml.md)
-
-- [Nvidia GPU](https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/integrations/nvidia_gpu.md)
-
-- [Raritan PDU](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/raritan_pdu.md)
-
-- [S.M.A.R.T.](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md)
-
-- [ServerTech](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/servertech.md)
-
-- [Siemens S7 PLC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/siemens_s7_plc.md)
-
-- [T-Rex NVIDIA GPU Miner](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md)
-
-### IoT Devices
-
-- [Airthings Waveplus air sensor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/airthings_waveplus_air_sensor.md)
-
-- [Bobcat Miner 300](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bobcat_miner_300.md)
-
-- [Christ Elektronik CLM5IP power panel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md)
-
-- [CraftBeerPi](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/craftbeerpi.md)
-
-- [Dutch Electricity Smart Meter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dutch_electricity_smart_meter.md)
-
-- [Elgato Key Light devices.](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/elgato_key_light_devices..md)
-
-- [Energomera smart power meters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/energomera_smart_power_meters.md)
-
-- [Helium hotspot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/helium_hotspot.md)
-
-- [Homebridge](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/homebridge.md)
-
-- [Homey](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/homey.md)
-
-- [Jarvis Standing Desk](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/jarvis_standing_desk.md)
-
-- [MP707 USB thermometer](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mp707_usb_thermometer.md)
-
-- [Modbus protocol](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/modbus_protocol.md)
-
-- [Monnit Sensors MQTT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/monnit_sensors_mqtt.md)
-
-- [Nature Remo E lite devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nature_remo_e_lite_devices.md)
-
-- [Netatmo sensors](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netatmo_sensors.md)
-
-- [OpenHAB](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openhab.md)
-
-- [Personal Weather Station](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/personal_weather_station.md)
-
-- [Philips Hue](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/philips_hue.md)
-
-- [Pimoroni Enviro+](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pimoroni_enviro+.md)
-
-- [Powerpal devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/powerpal_devices.md)
-
-- [Radio Thermostat](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/radio_thermostat.md)
-
-- [SMA Inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sma_inverters.md)
-
-- [Salicru EQX inverter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/salicru_eqx_inverter.md)
-
-- [Sense Energy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sense_energy.md)
-
-- [Shelly humidity sensor](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/shelly_humidity_sensor.md)
-
-- [Smart meters SML](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/smart_meters_sml.md)
-
-- [Solar logging stick](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solar_logging_stick.md)
-
-- [SolarEdge inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solaredge_inverters.md)
-
-- [Solis Ginlong 5G inverters](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/solis_ginlong_5g_inverters.md)
-
-- [Sunspec Solar Energy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sunspec_solar_energy.md)
-
-- [TP-Link P110](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tp-link_p110.md)
-
-- [Tado smart heating solution](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tado_smart_heating_solution.md)
-
-- [Tesla Powerwall](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_powerwall.md)
-
-- [Tesla Wall Connector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_wall_connector.md)
-
-- [Tesla vehicle](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/tesla_vehicle.md)
-
-- [Xiaomi Mi Flora](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/xiaomi_mi_flora.md)
-
-- [iqAir AirVisual air quality monitors](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md)
-
-### Kubernetes
-
-- [Cilium Agent](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_agent.md)
-
-- [Cilium Operator](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_operator.md)
-
-- [Cilium Proxy](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cilium_proxy.md)
-
-- [Kubelet](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/integrations/kubelet.md)
-
-- [Kubeproxy](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/integrations/kubeproxy.md)
-
-- [Kubernetes Cluster Cloud Cost](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md)
-
-- [Kubernetes Cluster State](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/integrations/kubernetes_cluster_state.md)
-
-- [Kubernetes Containers](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/kubernetes_containers.md)
-
-- [Rancher](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/rancher.md)
-
-### Linux Systems
-
-- [CPU performance](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/integrations/cpu_performance.md)
-
-- [Disk space](https://github.com/netdata/netdata/blob/master/collectors/diskspace.plugin/integrations/disk_space.md)
-
-- [Files and directories](https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/integrations/files_and_directories.md)
-
-- [OpenRC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openrc.md)
-
-#### CPU
-
-- [Interrupts](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/interrupts.md)
-
-- [SoftIRQ statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/softirq_statistics.md)
-
-#### Disk
-
-- [Disk Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/disk_statistics.md)
-
-- [MD RAID](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/md_raid.md)
-
-##### BTRFS
-
-- [BTRFS](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/btrfs.md)
-
-##### NFS
-
-- [NFS Client](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/nfs_client.md)
-
-- [NFS Server](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/nfs_server.md)
-
-##### ZFS
-
-- [ZFS Adaptive Replacement Cache](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md)
-
-- [ZFS Pools](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zfs_pools.md)
-
-#### Firewall
-
-- [Conntrack](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/conntrack.md)
-
-- [Netfilter](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/integrations/netfilter.md)
-
-- [Synproxy](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/synproxy.md)
-
-- [nftables](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nftables.md)
-
-#### IPC
-
-- [Inter Process Communication](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/inter_process_communication.md)
-
-#### Kernel
-
-- [Linux kernel SLAB allocator statistics](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md)
-
-- [Power Capping](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/power_capping.md)
-
-#### Memory
-
-- [Kernel Same-Page Merging](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/kernel_same-page_merging.md)
-
-- [Linux ZSwap](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/linux_zswap.md)
-
-- [Memory Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_statistics.md)
-
-- [Memory Usage](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_usage.md)
-
-- [Memory modules (DIMMs)](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/memory_modules_dimms.md)
-
-- [Non-Uniform Memory Access](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/non-uniform_memory_access.md)
-
-- [Page types](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/page_types.md)
-
-- [System Memory Fragmentation](https://github.com/netdata/netdata/blob/master/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md)
-
-- [ZRAM](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/zram.md)
-
-#### Network
-
-- [Access Points](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/ap/integrations/access_points.md)
-
-- [IP Virtual Server](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/ip_virtual_server.md)
-
-- [IPv6 Socket Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/ipv6_socket_statistics.md)
-
-- [InfiniBand](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/infiniband.md)
-
-- [Network interfaces](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/network_interfaces.md)
-
-- [Network statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/network_statistics.md)
-
-- [SCTP Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/sctp_statistics.md)
-
-- [Socket statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/socket_statistics.md)
-
-- [Softnet Statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/softnet_statistics.md)
-
-- [Wireless network interfaces](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/wireless_network_interfaces.md)
-
-- [tc QoS classes](https://github.com/netdata/netdata/blob/master/collectors/tc.plugin/integrations/tc_qos_classes.md)
-
-#### Power Supply
-
-- [Power Supply](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/power_supply.md)
-
-#### Pressure
-
-- [Pressure Stall Information](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/pressure_stall_information.md)
-
-#### System
-
-- [Entropy](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/entropy.md)
-
-- [System Load Average](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_load_average.md)
-
-- [System Uptime](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_uptime.md)
-
-- [System statistics](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/integrations/system_statistics.md)
-
-### Logs Servers
-
-- [AuthLog](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/authlog.md)
-
-- [Fluentd](https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/integrations/fluentd.md)
-
-- [Graylog Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/graylog_server.md)
-
-- [Logstash](https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/integrations/logstash.md)
-
-- [journald](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/journald.md)
-
-- [loki](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/loki.md)
-
-- [mtail](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mtail.md)
-
-### macOS Systems
-
-- [Apple Time Machine](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apple_time_machine.md)
-
-- [macOS](https://github.com/netdata/netdata/blob/master/collectors/macos.plugin/integrations/macos.md)
-
-### Mail Servers
-
-- [DMARC](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dmarc.md)
-
-- [Dovecot](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/dovecot/integrations/dovecot.md)
-
-- [Exim](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/exim/integrations/exim.md)
-
-- [Halon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/halon.md)
-
-- [Maildir](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/maildir.md)
-
-- [Postfix](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/postfix/integrations/postfix.md)
-
-### Media Services
-
-- [Discourse](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/discourse.md)
-
-- [Icecast](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/icecast/integrations/icecast.md)
-
-- [OBS Studio](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/obs_studio.md)
-
-- [RetroShare](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/retroshare/integrations/retroshare.md)
-
-- [SABnzbd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sabnzbd.md)
-
-- [Stream](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/stream.md)
-
-- [Twitch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/twitch.md)
-
-- [Zulip](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zulip.md)
-
-### Message Brokers
-
-- [ActiveMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/integrations/activemq.md)
-
-- [Apache Pulsar](https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/integrations/apache_pulsar.md)
-
-- [Beanstalk](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md)
-
-- [IBM MQ](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_mq.md)
-
-- [Kafka Connect](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_connect.md)
-
-- [Kafka ZooKeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_zookeeper.md)
-
-- [Kafka](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka.md)
-
-- [MQTT Blackbox](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mqtt_blackbox.md)
-
-- [RabbitMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/integrations/rabbitmq.md)
-
-- [Redis Queue](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/redis_queue.md)
-
-- [VerneMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/integrations/vernemq.md)
-
-- [XMPP Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/xmpp_server.md)
-
-- [mosquitto](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mosquitto.md)
-
-### Networking Stack and Network Interfaces
-
-- [8430FT modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/8430ft_modem.md)
-
-- [A10 ACOS network devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/a10_acos_network_devices.md)
-
-- [Andrews & Arnold line status](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/andrews_&_arnold_line_status.md)
-
-- [Aruba devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/aruba_devices.md)
-
-- [Bird Routing Daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bird_routing_daemon.md)
-
-- [Checkpoint device](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/checkpoint_device.md)
-
-- [Cisco ACI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cisco_aci.md)
-
-- [Citrix NetScaler](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/citrix_netscaler.md)
-
-- [DDWRT Routers](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ddwrt_routers.md)
-
-- [FRRouting](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/frrouting.md)
-
-- [Fortigate firewall](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fortigate_firewall.md)
-
-- [Freifunk network](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/freifunk_network.md)
-
-- [Fritzbox network devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fritzbox_network_devices.md)
-
-- [Hitron CGN series CPE](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hitron_cgn_series_cpe.md)
-
-- [Hitron CODA Cable Modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hitron_coda_cable_modem.md)
-
-- [Huawei devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/huawei_devices.md)
-
-- [Keepalived](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/keepalived.md)
-
-- [Meraki dashboard](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/meraki_dashboard.md)
-
-- [MikroTik devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mikrotik_devices.md)
-
-- [Mikrotik RouterOS devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mikrotik_routeros_devices.md)
-
-- [NetFlow](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netflow.md)
-
-- [NetMeter](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netmeter.md)
-
-- [Open vSwitch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/open_vswitch.md)
-
-- [OpenROADM devices](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openroadm_devices.md)
-
-- [RIPE Atlas](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ripe_atlas.md)
-
-- [SONiC NOS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sonic_nos.md)
-
-- [SmartRG 808AC Cable Modem](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/smartrg_808ac_cable_modem.md)
-
-- [Starlink (SpaceX)](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/starlink_spacex.md)
-
-- [Traceroute](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/traceroute.md)
-
-- [Ubiquiti UFiber OLT](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ubiquiti_ufiber_olt.md)
-
-- [Zyxel GS1200-8](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/zyxel_gs1200-8.md)
-
-### Incident Management
-
-- [OTRS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/otrs.md)
-
-- [StatusPage](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/statuspage.md)
-
-### Observability
-
-- [Collectd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/collectd.md)
-
-- [Dynatrace](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dynatrace.md)
-
-- [Grafana](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/grafana.md)
-
-- [Hubble](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hubble.md)
-
-- [Naemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/naemon.md)
-
-- [Nagios](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/nagios.md)
-
-- [New Relic](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/new_relic.md)
-
-### Other
-
-- [Example collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/integrations/example_collector.md)
-
-- [GitHub API rate limit](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/github_api_rate_limit.md)
-
-- [GitHub repository](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/github_repository.md)
-
-- [Netdata Agent alarms](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md)
-
-- [python.d changefinder](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md)
-
-- [python.d zscores](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md)
-
-### Processes and System Services
-
-- [Applications](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/applications.md)
-
-- [Supervisor](https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/integrations/supervisor.md)
-
-- [User Groups](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/user_groups.md)
-
-- [Users](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/integrations/users.md)
-
-### Provisioning Systems
-
-- [BOSH](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/bosh.md)
-
-- [Cloud Foundry Firehose](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloud_foundry_firehose.md)
-
-- [Cloud Foundry](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloud_foundry.md)
-
-- [Spacelift](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/spacelift.md)
-
-### Search Engines
-
-- [Elasticsearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/integrations/elasticsearch.md)
-
-- [Meilisearch](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/meilisearch.md)
-
-- [OpenSearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/integrations/opensearch.md)
-
-- [Solr](https://github.com/netdata/go.d.plugin/blob/master/modules/solr/integrations/solr.md)
-
-- [Sphinx](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/sphinx.md)
-
-### Security Systems
-
-- [Certificate Transparency](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/certificate_transparency.md)
-
-- [ClamAV daemon](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clamav_daemon.md)
-
-- [Clamscan results](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clamscan_results.md)
-
-- [Crowdsec](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/crowdsec.md)
-
-- [Honeypot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/honeypot.md)
-
-- [Lynis audit reports](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lynis_audit_reports.md)
-
-- [OpenVAS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/openvas.md)
-
-- [SSL Certificate](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ssl_certificate.md)
-
-- [Suricata](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/suricata.md)
-
-- [Vault PKI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/vault_pki.md)
-
-### Service Discovery / Registry
-
-- [Consul](https://github.com/netdata/go.d.plugin/blob/master/modules/consul/integrations/consul.md)
-
-- [Kafka Consumer Lag](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kafka_consumer_lag.md)
-
-- [ZooKeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/integrations/zookeeper.md)
-
-- [etcd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/etcd.md)
-
-### Storage, Mount Points and Filesystems
-
-- [AdaptecRAID](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md)
-
-- [Altaro Backup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/altaro_backup.md)
-
-- [Borg backup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/borg_backup.md)
-
-- [CVMFS clients](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cvmfs_clients.md)
-
-- [Ceph](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ceph/integrations/ceph.md)
-
-- [Dell EMC Isilon cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_isilon_cluster.md)
-
-- [Dell EMC ScaleIO](https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/integrations/dell_emc_scaleio.md)
-
-- [Dell EMC XtremIO cluster](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_emc_xtremio_cluster.md)
-
-- [Dell PowerMax](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/dell_powermax.md)
-
-- [EOS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/eos.md)
-
-- [Generic storage enclosure tool](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/generic_storage_enclosure_tool.md)
-
-- [HDSentinel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hdsentinel.md)
-
-- [HP Smart Storage Arrays](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md)
-
-- [Hadoop Distributed File System (HDFS)](https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md)
-
-- [IBM Spectrum Virtualize](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_spectrum_virtualize.md)
-
-- [IBM Spectrum](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/ibm_spectrum.md)
-
-- [IPFS](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/ipfs/integrations/ipfs.md)
-
-- [Lagerist Disk latency](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/lagerist_disk_latency.md)
-
-- [MegaCLI](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/megacli/integrations/megacli.md)
-
-- [MogileFS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mogilefs.md)
-
-- [NVMe devices](https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/integrations/nvme_devices.md)
-
-- [NetApp Solidfire](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netapp_solidfire.md)
-
-- [Netapp ONTAP API](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/netapp_ontap_api.md)
-
-- [Samba](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/samba/integrations/samba.md)
-
-- [Starwind VSAN VSphere Edition](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md)
-
-- [Storidge](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/storidge.md)
-
-- [Synology ActiveBackup](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/synology_activebackup.md)
-
-### Synthetic Checks
-
-- [Blackbox](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/blackbox.md)
-
-- [Domain expiration date](https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/integrations/domain_expiration_date.md)
-
-- [HTTP Endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/integrations/http_endpoints.md)
-
-- [IOPing](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/integrations/ioping.md)
-
-- [Idle OS Jitter](https://github.com/netdata/netdata/blob/master/collectors/idlejitter.plugin/integrations/idle_os_jitter.md)
-
-- [Monit](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/monit/integrations/monit.md)
-
-- [Ping](https://github.com/netdata/go.d.plugin/blob/master/modules/ping/integrations/ping.md)
-
-- [Pingdom](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/pingdom.md)
-
-- [Site 24x7](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/site_24x7.md)
-
-- [TCP Endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/integrations/tcp_endpoints.md)
-
-- [Uptimerobot](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/uptimerobot.md)
-
-- [X.509 certificate](https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/integrations/x.509_certificate.md)
-
-### System Clock and NTP
-
-- [Chrony](https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/integrations/chrony.md)
-
-- [NTPd](https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/integrations/ntpd.md)
-
-- [Timex](https://github.com/netdata/netdata/blob/master/collectors/timex.plugin/integrations/timex.md)
-
-### Systemd
-
-- [Systemd Services](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/integrations/systemd_services.md)
-
-- [Systemd Units](https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/integrations/systemd_units.md)
-
-- [systemd-logind users](https://github.com/netdata/go.d.plugin/blob/master/modules/logind/integrations/systemd-logind_users.md)
-
-### Task Queues
-
-- [Celery](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/celery.md)
-
-- [Mesos](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/mesos.md)
-
-- [Slurm](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/slurm.md)
-
-### Telephony Servers
-
-- [GTP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gtp.md)
-
-- [Kannel](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/kannel.md)
-
-- [OpenSIPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/opensips/integrations/opensips.md)
-
-### UPS
-
-- [APC UPS](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md)
-
-- [Eaton UPS](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/eaton_ups.md)
-
-- [UPS (NUT)](https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/integrations/ups_nut.md)
-
-### VPNs
-
-- [Fastd](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/fastd.md)
-
-- [Libreswan](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/libreswan/integrations/libreswan.md)
-
-- [OpenVPN status log](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/integrations/openvpn_status_log.md)
-
-- [OpenVPN](https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/integrations/openvpn.md)
-
-- [SoftEther VPN Server](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/softether_vpn_server.md)
-
-- [Speedify CLI](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/speedify_cli.md)
-
-- [Tor](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tor/integrations/tor.md)
-
-- [WireGuard](https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/integrations/wireguard.md)
-
-- [strongSwan](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/strongswan.md)
-
-### Web Servers and Web Proxies
-
-- [APIcast](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/apicast.md)
-
-- [Apache](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/integrations/apache.md)
-
-- [Clash](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/clash.md)
-
-- [Cloudflare PCAP](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/cloudflare_pcap.md)
-
-- [Envoy](https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/integrations/envoy.md)
-
-- [Gobetween](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/gobetween.md)
-
-- [HAProxy](https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/integrations/haproxy.md)
-
-- [HHVM](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/integrations/hhvm.md)
-
-- [HTTPD](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/integrations/httpd.md)
-
-- [Lighttpd](https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/integrations/lighttpd.md)
-
-- [Litespeed](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/litespeed/integrations/litespeed.md)
-
-- [NGINX Plus](https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/integrations/nginx_plus.md)
-
-- [NGINX VTS](https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/integrations/nginx_vts.md)
-
-- [NGINX](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/integrations/nginx.md)
-
-- [PHP-FPM](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/integrations/php-fpm.md)
-
-- [Squid log files](https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/integrations/squid_log_files.md)
-
-- [Squid](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/squid/integrations/squid.md)
-
-- [Tengine](https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/integrations/tengine.md)
-
-- [Tomcat](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/tomcat/integrations/tomcat.md)
-
-- [Traefik](https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/integrations/traefik.md)
-
-- [Varnish](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/varnish/integrations/varnish.md)
-
-- [Web server log files](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/integrations/web_server_log_files.md)
-
-- [uWSGI](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md)
-
-### Windows Systems
-
-- [Active Directory](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/active_directory.md)
-
-- [HyperV](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/hyperv.md)
-
-- [MS Exchange](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/ms_exchange.md)
-
-- [MS SQL Server](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/ms_sql_server.md)
-
-- [NET Framework](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/net_framework.md)
-
-- [Windows](https://github.com/netdata/go.d.plugin/blob/master/modules/windows/integrations/windows.md)
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
deleted file mode 100644
index 1bbb2e0ef..000000000
--- a/collectors/Makefile.am
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- plugins.d \
- apps.plugin \
- cgroups.plugin \
- charts.d.plugin \
- cups.plugin \
- debugfs.plugin \
- diskspace.plugin \
- timex.plugin \
- ioping.plugin \
- freebsd.plugin \
- freeipmi.plugin \
- idlejitter.plugin \
- log2journal \
- macos.plugin \
- nfacct.plugin \
- xenstat.plugin \
- perf.plugin \
- proc.plugin \
- python.d.plugin \
- slabinfo.plugin \
- statsd.plugin \
- ebpf.plugin \
- tc.plugin \
- systemd-journal.plugin \
- $(NULL)
-
-usercustompluginsconfigdir=$(configdir)/custom-plugins.d
-usergoconfigdir=$(configdir)/go.d
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(usercustompluginsconfigdir)
- $(INSTALL) -d $(DESTDIR)$(usergoconfigdir)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/README.md b/collectors/README.md
deleted file mode 100644
index 7676ff866..000000000
--- a/collectors/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Collectors
-
-When Netdata starts, and with zero configuration, it auto-detects thousands of data sources and immediately collects
-per-second metrics.
-
-Netdata can immediately collect metrics from these endpoints thanks to 300+ **collectors**, which all come pre-installed
-when you [install Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md).
-
-All collectors are **installed by default** with every installation of Netdata. You do not need to install
-collectors manually to collect metrics from new sources.
-See how you can [monitor anything with Netdata](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
-
-Upon startup, Netdata will **auto-detect** any application or service that has a collector, as long as both the collector
-and the app/service are configured correctly. If you don't see charts for your application, see
-our [collectors' configuration reference](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md).
-
-## How Netdata's metrics collectors work
-
-Every collector has two primary jobs:
-
-- Look for exposed metrics at a pre- or user-defined endpoint.
-- Gather exposed metrics and use additional logic to build meaningful, interactive visualizations.
-
-If the collector finds compatible metrics exposed on the configured endpoint, it begins a per-second collection job. The
-Netdata Agent gathers these metrics, sends them to the
-[database engine for storage](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md)
-, and immediately
-[visualizes them meaningfully](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md)
-on dashboards.
-
-Each collector comes with a pre-defined configuration that matches the default setup for that application. This endpoint
-can be a URL and port, a socket, a file, a web page, and more. The endpoint is user-configurable, as are many other
-specifics of what a given collector does.
-
-## Collector architecture and terminology
-
-- **Collectors** are the processes/programs that actually gather metrics from various sources.
-
-- **Plugins** help manage all the independent data collection processes in a variety of programming languages, based on
- their purpose and performance requirements. There are three types of plugins:
-
- - **Internal** plugins organize collectors that gather metrics from `/proc`, `/sys` and other Linux kernel sources.
- They are written in `C`, and run as threads within the Netdata daemon.
-
- - **External** plugins organize collectors that gather metrics from external processes, such as a MySQL database or
- Nginx web server. They can be written in any language, and the `netdata` daemon spawns them as long-running
- independent processes. They communicate with the daemon via pipes. All external plugins are managed by
- [plugins.d](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md), which provides additional management options.
-
-- **Orchestrators** are external plugins that run and manage one or more modules. They run as independent processes.
- The Go orchestrator is in active development.
-
- - [go.d.plugin](https://github.com/netdata/go.d.plugin/blob/master/README.md): An orchestrator for data
- collection modules written in `go`.
-
- - [python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md):
- An orchestrator for data collection modules written in `python` v2/v3.
-
- - [charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md):
- An orchestrator for data collection modules written in`bash` v4+.
-
-- **Modules** are the individual programs controlled by an orchestrator to collect data from a specific application, or type of endpoint.
diff --git a/collectors/REFERENCE.md b/collectors/REFERENCE.md
deleted file mode 100644
index f19533f21..000000000
--- a/collectors/REFERENCE.md
+++ /dev/null
@@ -1,149 +0,0 @@
-<!--
-title: "Collectors configuration reference"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/REFERENCE.md"
-sidebar_label: "Collectors configuration"
-learn_status: "Published"
-learn_topic_type: "Tasks"
-learn_rel_path: "Configuration"
--->
-
-# Collectors configuration reference
-
-The list of supported collectors can be found in [the documentation](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md),
-and on [our website](https://www.netdata.cloud/integrations). The documentation of each collector provides all the
-necessary configuration options and prerequisites for that collector. In most cases, either the charts are automatically generated
-without any configuration, or you just fulfil those prerequisites and [configure the collector](#configure-a-collector).
-
-If the application you are interested in monitoring is not listed in our integrations, the collectors list includes
-the available options to
-[add your application to Netdata](https://github.com/netdata/netdata/edit/master/collectors/COLLECTORS.md#add-your-application-to-netdata).
-
-If we do support your collector but the charts described in the documentation don't appear on your dashboard, the reason will
-be one of the following:
-
-- The entire data collection plugin is disabled by default. Read how to [enable and disable plugins](#enable-and-disable-plugins)
-
-- The data collection plugin is enabled, but a specific data collection module is disabled. Read how to
- [enable and disable a specific collection module](#enable-and-disable-a-specific-collection-module).
-
-- Autodetection failed. Read how to [configure](#configure-a-collector) and [troubleshoot](#troubleshoot-a-collector) a collector.
-
-## Enable and disable plugins
-
-You can enable or disable individual plugins by opening `netdata.conf` and scrolling down to the `[plugins]` section.
-This section features a list of Netdata's plugins, with a boolean setting to enable or disable them. The exception is
-`statsd.plugin`, which has its own `[statsd]` section. Your `[plugins]` section should look similar to this:
-
-```conf
-[plugins]
- # timex = yes
- # idlejitter = yes
- # netdata monitoring = yes
- # tc = yes
- # diskspace = yes
- # proc = yes
- # cgroups = yes
- # enable running new plugins = yes
- # check for new plugins every = 60
- # slabinfo = no
- # python.d = yes
- # perf = yes
- # ioping = yes
- # fping = yes
- # nfacct = yes
- # go.d = yes
- # apps = yes
- # ebpf = yes
- # charts.d = yes
- # statsd = yes
-```
-
-By default, most plugins are enabled, so you don't need to enable them explicitly to use their collectors. To enable or
-disable any specific plugin, remove the comment (`#`) and change the boolean setting to `yes` or `no`.
-
-## Enable and disable a specific collection module
-
-You can enable/disable of the collection modules supported by `go.d`, `python.d` or `charts.d` individually, using the
-configuration file of that orchestrator. For example, you can change the behavior of the Go orchestrator, or any of its
-collectors, by editing `go.d.conf`.
-
-Use `edit-config` from your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory)
-to open the orchestrator primary configuration file:
-
-```bash
-cd /etc/netdata
-sudo ./edit-config go.d.conf
-```
-
-Within this file, you can either disable the orchestrator entirely (`enabled: yes`), or find a specific collector and
-enable/disable it with `yes` and `no` settings. Uncomment any line you change to ensure the Netdata daemon reads it on
-start.
-
-After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-## Configure a collector
-
-Most collector modules come with **auto-detection**, configured to work out-of-the-box on popular operating systems with
-the default settings.
-
-However, there are cases that auto-detection fails. Usually, the reason is that the applications to be monitored do not
-allow Netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect
-metrics, will automatically enable data collection for the application in question (it will require a Netdata restart).
-
-When Netdata starts up, each collector searches for exposed metrics on the default endpoint established by that service
-or application's standard installation procedure. For example,
-the [Nginx collector](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md) searches at
-`http://127.0.0.1/stub_status` for exposed metrics in the correct format. If an Nginx web server is running and exposes
-metrics on that endpoint, the collector begins gathering them.
-
-However, not every node or infrastructure uses standard ports, paths, files, or naming conventions. You may need to
-enable or configure a collector to gather all available metrics from your systems, containers, or applications.
-
-First, [find the collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) you want to edit
-and open its documentation. Some software has collectors written in multiple languages. In these cases, you should always
-pick the collector written in Go.
-
-Use `edit-config` from your
-[Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory)
-to open a collector's configuration file. For example, edit the Nginx collector with the following:
-
-```bash
-./edit-config go.d/nginx.conf
-```
-
-Each configuration file describes every available option and offers examples to help you tweak Netdata's settings
-according to your needs. In addition, every collector's documentation shows the exact command you need to run to
-configure that collector. Uncomment any line you change to ensure the collector's orchestrator or the Netdata daemon
-read it on start.
-
-After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-## Troubleshoot a collector
-
-First, navigate to your plugins directory, which is usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case
-on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the plugins directory,
-switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-The next step is based on the collector's orchestrator.
-
-```bash
-# Go orchestrator (go.d.plugin)
-./go.d.plugin -d -m <MODULE_NAME>
-
-# Python orchestrator (python.d.plugin)
-./python.d.plugin <MODULE_NAME> debug trace
-
-# Bash orchestrator (bash.d.plugin)
-./charts.d.plugin debug 1 <MODULE_NAME>
-```
-
-The output from the relevant command will provide valuable troubleshooting information. If you can't figure out how to
-enable the collector using the details from this output, feel free to [join our Discord server](https://discord.com/invite/mPZ6WZKKG2),
-to get help from our experts.
diff --git a/collectors/all.h b/collectors/all.h
deleted file mode 100644
index 38241dfa9..000000000
--- a/collectors/all.h
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_ALL_H
-#define NETDATA_ALL_H 1
-
-#include "daemon/common.h"
-
-// netdata internal data collection plugins
-
-#include "plugins.d/plugins_d.h"
-
-// ----------------------------------------------------------------------------
-// netdata chart priorities
-
-// This is a work in progress - to scope is to collect here all chart priorities.
-// These should be based on the CONTEXT of the charts + the chart id when needed
-// - for each SECTION +1000 (or +X000 for big sections)
-// - for each FAMILY +100
-// - for each CHART +10
-
-#define NETDATA_CHART_PRIO_SYSTEM_CPU 100
-#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100
-#define NETDATA_CHART_PRIO_SYSTEM_IO 150
-#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
-#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
-#define NETDATA_CHART_PRIO_SYSTEM_NET 500
-#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IP 501
-#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502
-#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600
-#define NETDATA_CHART_PRIO_SYSTEM_PROCESS_STATES 601
-#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700
-#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750
-#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800
-#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800
-#define NETDATA_CHART_PRIO_SYSTEM_INTR 900
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950
-#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955
-#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000
-#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_FILES_NR 1000
-#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000
-#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000
-#define NETDATA_CHART_PRIO_CLOCK_SYNC_STATE 1100
-#define NETDATA_CHART_PRIO_CLOCK_STATUS 1105
-#define NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET 1110
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206
-#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS 1207
-#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
-
-
-// CPU per core
-
-#define NETDATA_CHART_PRIO_CPU_PER_CORE 1000 // +1 per core
-#define NETDATA_CHART_PRIO_CPU_TEMPERATURE 1050 // freebsd only
-#define NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ 5003 // freebsd only
-#define NETDATA_CHART_PRIO_CPUIDLE 6000
-
-#define NETDATA_CHART_PRIO_CORE_THROTTLING 5001
-#define NETDATA_CHART_PRIO_PACKAGE_THROTTLING 5002
-
-// Interrupts per core
-
-#define NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE 1100 // +1 per core
-
-// Memory Section - 1xxx
-
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE 1010
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_OOM_KILL 1020
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1030
-#define NETDATA_CHART_PRIO_MEM_SWAP 1035
-#define NETDATA_CHART_PRIO_MEM_SWAP_CALLS 1037
-#define NETDATA_CHART_PRIO_MEM_SWAPIO 1038
-#define NETDATA_CHART_PRIO_MEM_ZSWAP 1036
-#define NETDATA_CHART_PRIO_MEM_ZSWAPIO 1037
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_COMPRESS_RATIO 1038
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_POOL_TOT_SIZE 1039
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_STORED_PAGE 1040
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS 1041
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_POOL_LIM_HIT 1042
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_WRT_BACK_PAGES 1043
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_SAME_FILL_PAGE 1044
-#define NETDATA_CHART_PRIO_MEM_ZSWAP_DUPP_ENTRY 1045
-#define NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS 1050
-#define NETDATA_CHART_PRIO_MEM_KERNEL 1100
-#define NETDATA_CHART_PRIO_MEM_SLAB 1200
-#define NETDATA_CHART_PRIO_MEM_RECLAIMING 1210
-#define NETDATA_CHART_PRIO_MEM_HIGH_LOW 1211
-#define NETDATA_CHART_PRIO_MEM_CMA 1212
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES 1250
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_DETAILS 1251
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_FAULTS 1252
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_FILE 1253
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_ZERO 1254
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_KHUGEPAGED 1255
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_SPLITS 1256
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_SWAPOUT 1257
-#define NETDATA_CHART_PRIO_MEM_HUGEPAGES_COMPACT 1258
-#define NETDATA_CHART_PRIO_MEM_DIRECTMAP 1260
-#define NETDATA_CHART_PRIO_MEM_KSM 1300
-#define NETDATA_CHART_PRIO_MEM_KSM_SAVINGS 1301
-#define NETDATA_CHART_PRIO_MEM_KSM_RATIOS 1302
-#define NETDATA_CHART_PRIO_MEM_KSM_COW 1303
-#define NETDATA_CHART_PRIO_MEM_BALLOON 1350
-#define NETDATA_CHART_PRIO_MEM_NUMA 1400
-#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
-#define NETDATA_CHART_PRIO_MEM_PAGEFRAG 1450
-#define NETDATA_CHART_PRIO_MEM_HW 1500
-#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
-#define NETDATA_CHART_PRIO_MEM_HW_ECC_UE 1560
-#define NETDATA_CHART_PRIO_MEM_ZRAM 1600
-#define NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS 1601
-#define NETDATA_CHART_PRIO_MEM_ZRAM_RATIO 1602
-#define NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY 1603
-#define NETDATA_CHART_PRIO_MEM_FRAGMENTATION 1700
-
-// Disks
-
-#define NETDATA_CHART_PRIO_DISK_IO 2000
-#define NETDATA_CHART_PRIO_DISK_OPS 2010
-#define NETDATA_CHART_PRIO_DISK_QOPS 2015
-#define NETDATA_CHART_PRIO_DISK_BACKLOG 2020
-#define NETDATA_CHART_PRIO_DISK_BUSY 2030
-#define NETDATA_CHART_PRIO_DISK_UTIL 2040
-#define NETDATA_CHART_PRIO_DISK_AWAIT 2050
-#define NETDATA_CHART_PRIO_DISK_AVGSZ 2060
-#define NETDATA_CHART_PRIO_DISK_SVCTM 2070
-#define NETDATA_CHART_PRIO_DISK_MOPS 2080
-#define NETDATA_CHART_PRIO_DISK_IOTIME 2090
-#define NETDATA_CHART_PRIO_DISK_LATENCY 2095
-#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
-#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
-#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
-#define NETDATA_CHART_PRIO_BCACHE_SIZE 2122
-#define NETDATA_CHART_PRIO_BCACHE_USAGE 2123
-#define NETDATA_CHART_PRIO_BCACHE_OPS 2124
-#define NETDATA_CHART_PRIO_BCACHE_BYPASS 2125
-#define NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES 2126
-
-#define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023
-#define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024
-
-// MDSTAT
-
-#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 2100
-#define NETDATA_CHART_PRIO_MDSTAT_FLUSH 2101
-#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 2105
-#define NETDATA_CHART_PRIO_MDSTAT_DISKS 2106 // 5 charts per raid
-#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 2107
-#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 2108
-#define NETDATA_CHART_PRIO_MDSTAT_FINISH 2109
-#define NETDATA_CHART_PRIO_MDSTAT_SPEED 2110
-
-// Filesystem
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN 2150
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT 2151
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES 2152
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES 2153
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC 2154
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC 2155
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN 2156
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN 2157
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE 2158
-#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE 2159
-
-#define NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS 2160
-
-// Mount Points
-#define NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS 2190
-
-// File descriptor
-#define NETDATA_CHART_PRIO_EBPF_FD_CHARTS 2195
-
-
-// NFS (server)
-
-#define NETDATA_CHART_PRIO_NFSD_READCACHE 2200
-#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2201
-#define NETDATA_CHART_PRIO_NFSD_IO 2202
-#define NETDATA_CHART_PRIO_NFSD_THREADS 2203
-#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2204
-#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2205
-#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2205
-#define NETDATA_CHART_PRIO_NFSD_NET 2207
-#define NETDATA_CHART_PRIO_NFSD_RPC 2208
-#define NETDATA_CHART_PRIO_NFSD_PROC2 2209
-#define NETDATA_CHART_PRIO_NFSD_PROC3 2210
-#define NETDATA_CHART_PRIO_NFSD_PROC4 2211
-#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2212
-
-// NFS (client)
-
-#define NETDATA_CHART_PRIO_NFS_NET 2307
-#define NETDATA_CHART_PRIO_NFS_RPC 2308
-#define NETDATA_CHART_PRIO_NFS_PROC2 2309
-#define NETDATA_CHART_PRIO_NFS_PROC3 2310
-#define NETDATA_CHART_PRIO_NFS_PROC4 2311
-
-// BTRFS
-
-#define NETDATA_CHART_PRIO_BTRFS_DISK 2400
-#define NETDATA_CHART_PRIO_BTRFS_DATA 2401
-#define NETDATA_CHART_PRIO_BTRFS_METADATA 2402
-#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2403
-#define NETDATA_CHART_PRIO_BTRFS_COMMITS 2404
-#define NETDATA_CHART_PRIO_BTRFS_COMMITS_PERC_TIME 2405
-#define NETDATA_CHART_PRIO_BTRFS_COMMIT_TIMINGS 2406
-#define NETDATA_CHART_PRIO_BTRFS_ERRORS 2407
-
-// ZFS
-
-#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE 2500
-#define NETDATA_CHART_PRIO_ZFS_L2_SIZE 2500
-#define NETDATA_CHART_PRIO_ZFS_READS 2510
-#define NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS 2519
-#define NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN 2520
-#define NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS 2522
-#define NETDATA_CHART_PRIO_ZFS_MEMORY_OPS 2523
-#define NETDATA_CHART_PRIO_ZFS_IO 2700
-#define NETDATA_CHART_PRIO_ZFS_HITS 2520
-#define NETDATA_CHART_PRIO_ZFS_DHITS 2530
-#define NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS 2540
-#define NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS 2550
-#define NETDATA_CHART_PRIO_ZFS_PHITS 2560
-#define NETDATA_CHART_PRIO_ZFS_MHITS 2570
-#define NETDATA_CHART_PRIO_ZFS_L2HITS 2580
-#define NETDATA_CHART_PRIO_ZFS_LIST_HITS 2600
-#define NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS 2800
-#define NETDATA_CHART_PRIO_ZFS_HASH_CHAINS 2810
-
-#define NETDATA_CHART_PRIO_ZFS_POOL_STATE 2820
-
-// HARDIRQS
-
-#define NETDATA_CHART_PRIO_HARDIRQ_LATENCY 2900
-
-// SOFTIRQs
-
-#define NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE 3000 // +1 per core
-
-// IPFW (freebsd)
-
-#define NETDATA_CHART_PRIO_IPFW_PACKETS 3001
-#define NETDATA_CHART_PRIO_IPFW_BYTES 3002
-#define NETDATA_CHART_PRIO_IPFW_ACTIVE 3003
-#define NETDATA_CHART_PRIO_IPFW_EXPIRED 3004
-#define NETDATA_CHART_PRIO_IPFW_MEM 3005
-
-
-// IPVS
-
-#define NETDATA_CHART_PRIO_IPVS_NET 3100
-#define NETDATA_CHART_PRIO_IPVS_SOCKETS 3101
-#define NETDATA_CHART_PRIO_IPVS_PACKETS 3102
-
-// Softnet
-
-#define NETDATA_CHART_PRIO_SOFTNET_PER_CORE 4101 // +1 per core
-
-// IP STACK
-
-#define NETDATA_CHART_PRIO_IP_TCP_PACKETS 4200
-#define NETDATA_CHART_PRIO_IP_TCP_ERRORS 4210
-#define NETDATA_CHART_PRIO_IP_TCP_ESTABLISHED_CONNS 4220
-#define NETDATA_CHART_PRIO_IP_TCP_OPENS 4220
-#define NETDATA_CHART_PRIO_IP_TCP_HANDSHAKE 4230
-#define NETDATA_CHART_PRIO_IP_TCP_CONNABORTS 4240
-#define NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE 4250
-#define NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE 4260
-#define NETDATA_CHART_PRIO_IP_TCP_REORDERS 4270
-#define NETDATA_CHART_PRIO_IP_TCP_OFO 4280
-#define NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES 4290
-#define NETDATA_CHART_PRIO_IP_TCP_MEM_PRESSURE 4300
-#define NETDATA_CHART_PRIO_IP_SOCKETS 4310
-
-// IPv4
-
-#define NETDATA_CHART_PRIO_IPV4_PACKETS 5000
-#define NETDATA_CHART_PRIO_IPV4_ERRORS 5050
-#define NETDATA_CHART_PRIO_IPV4_BCAST 5100
-#define NETDATA_CHART_PRIO_IPV4_BCAST_PACKETS 5105
-#define NETDATA_CHART_PRIO_IPV4_MCAST 5150
-#define NETDATA_CHART_PRIO_IPV4_MCAST_PACKETS 5155
-#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS 5180
-#define NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS_MEM 5185
-#define NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS 5200
-#define NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES 5205
-#define NETDATA_CHART_PRIO_IPV4_ICMP_ERRORS 5210
-#define NETDATA_CHART_PRIO_IPV4_UDP_PACKETS 5250
-#define NETDATA_CHART_PRIO_IPV4_UDP_ERRORS 5255
-#define NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS 5260
-#define NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS_MEM 5265
-#define NETDATA_CHART_PRIO_IPV4_UDPLITE_PACKETS 5300
-#define NETDATA_CHART_PRIO_IPV4_UDPLITE_ERRORS 5305
-#define NETDATA_CHART_PRIO_IPV4_UDPLITE_SOCKETS 5310
-#define NETDATA_CHART_PRIO_IPV4_ECN 5350
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_IN 5400
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_OUT 5405
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS 5410
-#define NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS_MEM 5415
-#define NETDATA_CHART_PRIO_IPV4_RAW 5450
-
-// IPv6
-#define NETDATA_CHART_PRIO_IPV6_PACKETS 6000
-#define NETDATA_CHART_PRIO_IPV6_ERRORS 6005
-#define NETDATA_CHART_PRIO_IPV6_BCAST 6050
-#define NETDATA_CHART_PRIO_IPV6_MCAST 6100
-#define NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS 6105
-#define NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS 6140
-#define NETDATA_CHART_PRIO_IPV6_ICMP 6150
-#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6155
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6160
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS 6165
-#define NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB 6170
-#define NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER 6180
-#define NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR 6185
-#define NETDATA_CHART_PRIO_IPV6_ICMP_LDV2 6190
-#define NETDATA_CHART_PRIO_IPV6_ICMP_TYPES 6195
-#define NETDATA_CHART_PRIO_IPV6_UDP 6200
-#define NETDATA_CHART_PRIO_IPV6_UDP_PACKETS 6205
-#define NETDATA_CHART_PRIO_IPV6_UDP_ERRORS 6210
-#define NETDATA_CHART_PRIO_IPV6_UDP_SOCKETS 6215
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE 6250
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS 6255
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS 6260
-#define NETDATA_CHART_PRIO_IPV6_UDPLITE_SOCKETS 6265
-#define NETDATA_CHART_PRIO_IPV6_ECT 6300
-#define NETDATA_CHART_PRIO_IPV6_FRAGSIN 6350
-#define NETDATA_CHART_PRIO_IPV6_FRAGSOUT 6355
-#define NETDATA_CHART_PRIO_IPV6_FRAGMENTS_SOCKETS 6360
-#define NETDATA_CHART_PRIO_IPV6_RAW_SOCKETS 6400
-
-// Network interfaces
-
-#define NETDATA_CHART_PRIO_FIRST_NET_IFACE 7000 // 6 charts per interface
-#define NETDATA_CHART_PRIO_FIRST_NET_PACKETS 7001
-#define NETDATA_CHART_PRIO_FIRST_NET_ERRORS 7002
-#define NETDATA_CHART_PRIO_FIRST_NET_DROPS 7003
-#define NETDATA_CHART_PRIO_FIRST_NET_EVENTS 7006
-#define NETDATA_CHART_PRIO_CGROUP_NET_IFACE 43000
-
-// SCTP
-
-#define NETDATA_CHART_PRIO_SCTP 7000
-
-// QoS
-
-#define NETDATA_CHART_PRIO_TC_QOS 7000
-#define NETDATA_CHART_PRIO_TC_QOS_PACKETS 7010
-#define NETDATA_CHART_PRIO_TC_QOS_DROPPED 7020
-#define NETDATA_CHART_PRIO_TC_QOS_TOKENS 7030
-#define NETDATA_CHART_PRIO_TC_QOS_CTOKENS 7040
-
-// Infiniband
-#define NETDATA_CHART_PRIO_INFINIBAND 7100
-
-// Netfilter
-
-#define NETDATA_CHART_PRIO_NETFILTER_SOCKETS 8700
-#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
-#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
-#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
-#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
-#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
-
-// SYNPROXY
-
-#define NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED 8751
-#define NETDATA_CHART_PRIO_SYNPROXY_COOKIES 8752
-#define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753
-#define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754
-
-// Linux Power Supply
-
-#define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply
-#define NETDATA_CHART_PRIO_POWER_SUPPLY_CHARGE 9501
-#define NETDATA_CHART_PRIO_POWER_SUPPLY_ENERGY 9502
-#define NETDATA_CHART_PRIO_POWER_SUPPLY_VOLTAGE 9503
-
-// Linux powercap
-
-#define NETDATA_CHART_PRIO_POWERCAP 9600
-
-// Wireless
-
-#define NETDATA_CHART_PRIO_WIRELESS_IFACE 7110
-
-// CGROUPS
-
-#define NETDATA_CHART_PRIO_CGROUPS_SYSTEMD 19000 // many charts
-#define NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 // many charts
-
-// STATSD
-
-#define NETDATA_CHART_PRIO_STATSD_PRIVATE 90000 // many charts
-
-// Logs Management
-
-#define NETDATA_CHART_PRIO_LOGS_BASE 95000 // many charts
-#define NETDATA_CHART_PRIO_LOGS_STATS_BASE 160000 // logsmanagement stats in "Netdata Monitoring"
-
-// PCI
-
-#define NETDATA_CHART_PRIO_PCI_AER 100000
-
-// AMD GPUs
-
-#define NETDATA_CHART_PRIO_DRM_AMDGPU 39000
-
-// NETDATA ML CHARTS
-
-// [ml] charts
-#define ML_CHART_PRIO_DIMENSIONS 39181
-#define ML_CHART_PRIO_ANOMALY_RATE 39182
-#define ML_CHART_PRIO_TYPE_ANOMALY_RATE 39183
-#define ML_CHART_PRIO_DETECTOR_EVENTS 39184
-
-// [netdata.ml] charts
-#define NETDATA_ML_CHART_RUNNING 890001
-#define NETDATA_ML_CHART_PRIO_MACHINE_LEARNING_STATUS 890002
-#define NETDATA_ML_CHART_PRIO_METRIC_TYPES 890003
-#define NETDATA_ML_CHART_PRIO_TRAINING_STATUS 890004
-
-#define NETDATA_ML_CHART_PRIO_PREDICTION_USAGE 890005
-#define NETDATA_ML_CHART_PRIO_TRAINING_USAGE 890006
-
-#define NETDATA_ML_CHART_PRIO_QUEUE_STATS 890007
-#define NETDATA_ML_CHART_PRIO_TRAINING_TIME_STATS 890008
-#define NETDATA_ML_CHART_PRIO_TRAINING_RESULTS 890009
-
-#define NETDATA_ML_CHART_FAMILY "machine learning"
-#define NETDATA_ML_PLUGIN "ml.plugin"
-#define NETDATA_ML_MODULE_TRAINING "training"
-#define NETDATA_ML_MODULE_DETECTION "detection"
-#define NETDATA_ML_MODULE_PREDICTION "prediction"
-
-
-#endif //NETDATA_ALL_H
diff --git a/collectors/apps.plugin/Makefile.am b/collectors/apps.plugin/Makefile.am
deleted file mode 100644
index 533b14dd0..000000000
--- a/collectors/apps.plugin/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- apps_groups.conf \
- $(NULL)
diff --git a/collectors/apps.plugin/README.md b/collectors/apps.plugin/README.md
deleted file mode 100644
index fd5371f08..000000000
--- a/collectors/apps.plugin/README.md
+++ /dev/null
@@ -1,402 +0,0 @@
-<!--
-title: "Application monitoring (apps.plugin)"
-sidebar_label: "Application monitoring "
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/README.md"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
-# Application monitoring (apps.plugin)
-
-`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**.
-It is enabled by default on every Netdata installation.
-
-To achieve this task, it iterates through the whole process tree, collecting resource usage information
-for every process found running.
-
-Since Netdata needs to present this information in charts and track them through time,
-instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
-to which it assigns all running processes. This list is customizable via `apps_groups.conf`, and Netdata
-ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
-
-So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
-processes together (evaluating both child and parent processes) so that the result is always a list with
-a predefined set of members (of course, only process groups found running are reported).
-
-> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
-> glad to accept pull requests improving the defaults shipped with Netdata in `apps_groups.conf`.
-
-Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
-utilization of exit processes. Their utilization is accounted at their currently running parents.
-So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes
-that fork/spawn other short-lived processes hundreds of times per second.
-
-## Charts
-
-`apps.plugin` provides charts for 3 sections:
-
-1. Per application charts as **Applications** at Netdata dashboards
-2. Per user charts as **Users** at Netdata dashboards
-3. Per user group charts as **User Groups** at Netdata dashboards
-
-Each of these sections provides the same number of charts:
-
-- CPU utilization (`apps.cpu`)
- - Total CPU usage
- - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
-- Disk I/O
- - Physical reads/writes (`apps.preads`/`apps.pwrites`)
- - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
- - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
-- Memory
- - Real Memory Used (non-shared, `apps.mem`)
- - Virtual Memory Allocated (`apps.vmem`)
- - Minor page faults (i.e. memory activity, `apps.minor_faults`)
-- Processes
- - Threads running (`apps.threads`)
- - Processes running (`apps.processes`)
- - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
- - Minimum uptime (`apps.uptime_min`)
- - Average uptime (`apps.uptime_average`)
- - Maximum uptime (`apps.uptime_max`)
- - Pipes open (`apps.pipes`)
-- Swap memory
- - Swap memory used (`apps.swap`)
- - Major page faults (i.e. swap activity, `apps.major_faults`)
-- Network
- - Sockets open (`apps.sockets`)
-
-In addition, if the [eBPF collector](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md) is running, your dashboard will also show an
-additional [list of charts](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md#integration-with-appsplugin) using low-level Linux
-metrics.
-
-The above are reported:
-
-- For **Applications** per target configured.
-- For **Users** per username or UID (when the username is not available).
-- For **User Groups** per group name or GID (when group name is not available).
-
-## Performance
-
-`apps.plugin` is a complex piece of software and has a lot of work to do
-We are proud that `apps.plugin` is a lot faster compared to any other similar tool,
-while collecting a lot more information for the processes, however the fact is that
-this plugin requires more CPU resources than the `netdata` daemon itself.
-
-Under Linux, for each process running, `apps.plugin` reads several `/proc` files
-per process. Doing this work per-second, especially on hosts with several thousands
-of processes, may increase the CPU resources consumed by the plugin.
-
-In such cases, you many need to lower its data collection frequency.
-
-To do this, edit `/etc/netdata/netdata.conf` and find this section:
-
-```
-[plugin:apps]
- # update every = 1
- # command options =
-```
-
-Uncomment the line `update every` and set it to a higher number. If you just set it to `2`,
-its CPU resources will be cut in half, and data collection will be once every 2 seconds.
-
-## Configuration
-
-The configuration file is `/etc/netdata/apps_groups.conf`. To edit it on your system, run `/etc/netdata/edit-config apps_groups.conf`.
-
-The configuration file works accepts multiple lines, each having this format:
-
-```txt
-group: process1 process2 ...
-```
-
-Each group can be given multiple times, to add more processes to it.
-
-For the **Applications** section, only groups configured in this file are reported.
-All other processes will be reported as `other`.
-
-For each process given, its whole process tree will be grouped, not just the process matched.
-The plugin will include both parents and children. If including the parents into the group is
-undesirable, the line `other: *` should be appended to the `apps_groups.conf`.
-
-The process names are the ones returned by:
-
-- `ps -e` or `cat /proc/PID/stat`
-- in case of substring mode (see below): `/proc/PID/cmdline`
-
-To add process names with spaces, enclose them in quotes (single or double)
-example: `'Plex Media Serv'` or `"my other process"`.
-
-You can add an asterisk `*` at the beginning and/or the end of a process:
-
-- `*name` _suffix_ mode: will search for processes ending with `name` (at `/proc/PID/stat`)
-- `name*` _prefix_ mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
-- `*name*` _substring_ mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
-
-If you enter even just one _name_ (substring), `apps.plugin` will process
-`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen).
-
-To add processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`
-
-To add processes with double quotes, enclose them in single quotes: `'process with this " double quote'`
-
-If a group or process name starts with a `-`, the dimension will be hidden from the chart (cpu chart only).
-
-If a process starts with a `+`, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
-
-You can add any number of groups. Only the ones found running will affect the charts generated.
-However, producing charts with hundreds of dimensions may slow down your web browser.
-
-The order of the entries in this list is important: the first that matches a process is used, so put important
-ones at the top. Processes not matched by any row, will inherit it from their parents or children.
-
-The order also controls the order of the dimensions on the generated charts (although applications started
-after apps.plugin is started, will be appended to the existing list of dimensions the `netdata` daemon maintains).
-
-There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. The options can be set in the `netdata.conf` file. For example, to disable user and user group charts you should set
-
-```
-[plugin:apps]
- command options = without-users without-groups
-```
-
-### Integration with eBPF
-
-If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your
-[`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md#configure-the-ebpf-collector) file to ensure the eBPF program is enabled.
-
-Also see our [guide on troubleshooting apps with eBPF
-metrics](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a
-few scenarios.
-
-## Permissions
-
-`apps.plugin` requires additional privileges to collect all the information it needs.
-The problem is described in issue #157.
-
-When Netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
-If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
-
-### linux capabilities in containers
-
-There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities
-are silently ignored (in `lxc` containers `setcap` fails).
-
-In this case, you will have to setuid to root `apps.plugin` by running these commands:
-
-```sh
-chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
-chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
-```
-
-You will have to run these, every time you update Netdata.
-
-## Security
-
-`apps.plugin` performs a hard-coded function of building the process tree in memory,
-iterating forever, collecting metrics for each running process and sending them to Netdata.
-This is a one-way communication, from `apps.plugin` to Netdata.
-
-So, since `apps.plugin` cannot be instructed by Netdata for the actions it performs,
-we think it is pretty safe to allow it to have these increased privileges.
-
-Keep in mind that `apps.plugin` will still run without escalated permissions,
-but it will not be able to collect all the information.
-
-## Application Badges
-
-You can create badges that you can embed anywhere you like, with URLs like this:
-
-```
-https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred
-```
-
-The color expression unescaped is this: `value_color=green>0|red`.
-
-Here is an example for the process group `sql` at `https://registry.my-netdata.io`:
-
-![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
-
-Netdata is able to give you a lot more badges for your app.
-Examples below for process group `sql`:
-
-- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
-- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_files&dimensions=sql&value_color=green%3E30%7Cred)
-- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
-- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
-- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
-- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
-- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
-- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_pipes&dimensions=sql&value_color=green=0%7Cred)
-- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_sockets&dimensions=sql&value_color=green%3E=3%7Cred)
-
-For more information about badges check [Generating Badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md)
-
-## Comparison with console tools
-
-SSH to a server running Netdata and execute this:
-
-```sh
-while true; do ls -l /var/run >/dev/null; done
-```
-
-In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command
-from consuming entirely one of the CPU cores of the machine.
-
-As we will see below, **none** of the console performance monitoring tools can report that this
-command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to
-identify the process that consumes so much CPU**.
-
-Here is what common Linux console monitoring tools report:
-
-### top
-
-`top` reports that `bash` is using just 14%.
-
-If you check the total system CPU utilization, it says there is no idle CPU at all, but `top`
-fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization
-of all processes reported by `top`, is 15.6%.
-
-```
-top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02
-Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie
-%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st
-KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache
-KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem
-
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
-12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash
- 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0
- 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata
-12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin
- 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd
- 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd
-```
-
-### htop
-
-Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization.
-
-```
- CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running
- Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
- Swp[ 0K/0K] Uptime: 3 days, 21:37:03
-
- PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command
-12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash
- 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd
- 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata
- 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata
- 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/
- 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata
-```
-
-### atop
-
-`atop` also fails to break down CPU usage.
-
-```
-ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed
-PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 |
-CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% |
-CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 |
-MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M |
-SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M |
-DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms |
-NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 |
-NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 |
-NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps |
-
- PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600
-12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash
- 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0
- 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin
- 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata
-```
-
-### glances
-
-And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17%
-per process utilization.
-
-Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs.
-
-```
-localhost Uptime: 3 days, 21:42:00
-
-CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core
-MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18
-SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08
- idle: 0.0% free: 757M free: 0 15 min: 1.00
-
-NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
-eth0 168b 2Kb
-eth1 0b 0b CPU% MEM% PID USER NI S Command
-lo 0b 0b 13.5 0.4 12789 root 0 S -bash
- 1.6 2.2 7025 root 0 R /usr/bin/python /u
-DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0
-vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda
- 0.3 0.0 7 root 0 S rcu_sched
-FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata
-/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper
-```
-
-### why does this happen?
-
-All the console tools report usage based on the processes found running *at the moment they
-examine the process tree*. So, they see just one `ls` command, which is actually very quick
-with minor CPU utilization. But the shell, is spawning hundreds of them, one after another
-(much like shell scripts do).
-
-### What does Netdata report?
-
-The total CPU utilization of the system:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png)
-<br/>***Figure 1**: The system overview section at Netdata, just a few seconds after the command was run*
-
-And at the applications `apps.plugin` breaks down CPU usage per application:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png)
-<br/>***Figure 2**: The Applications section at Netdata, just a few seconds after the command was run*
-
-So, the `ssh` session is using 95% CPU time.
-
-Why `ssh`?
-
-`apps.plugin` groups all processes based on its configuration file.
-The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates
-all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
-the process tree of `sshd`, **including the exited children**.
-
-> Distributions based on `systemd`, provide another way to get cpu utilization per user session
-> or service running: control groups, or cgroups, commonly used as part of containers
-> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works
-> on any Linux, `systemd` based or not.
-
-#### a more technical description of how Netdata works
-
-Netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
-`stime` (user and system cpu utilization), much like all the console tools do.
-
-But it also extracts `cutime` and `cstime` that account the user and system time of the exit children of each process.
-By keeping a map in memory of the whole process tree, it is capable of assigning the right time to every process, taking
-into account all its exited children.
-
-It is tricky, since a process may be running for 1 hour and once it exits, its parent should not
-receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has
-been reported for it prior to this iteration.
-
-It is even trickier, because walking through the entire process tree takes some time itself. So,
-if you sum the CPU utilization of all processes, you might have more CPU time than the reported
-total cpu time of the system. Netdata solves this, by adapting the per process cpu utilization to
-the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
-
-
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
deleted file mode 100644
index 195536a0a..000000000
--- a/collectors/apps.plugin/apps_groups.conf
+++ /dev/null
@@ -1,428 +0,0 @@
-#
-# apps.plugin process grouping
-#
-# The apps.plugin displays charts with information about the processes running.
-# This config allows grouping processes together, so that several processes
-# will be reported as one.
-#
-# Only groups in this file are reported. All other processes will be reported
-# as 'other'.
-#
-# For each process given, its whole process tree will be grouped, not just
-# the process matched. The plugin will include both parents and childs.
-#
-# The format is:
-#
-# group: process1 process2 process3 ...
-#
-# Each group can be given multiple times, to add more processes to it.
-#
-# The process names are the ones returned by:
-#
-# - ps -e or /proc/PID/stat
-# - in case of substring mode (see below): /proc/PID/cmdline
-#
-# To add process names with spaces, enclose them in quotes (single or double)
-# example: 'Plex Media Serv' "my other process".
-#
-# Note that spaces are not supported for process groups. Use a dash "-" instead.
-# example-process-group: process1 process2
-#
-# Wildcard support:
-# You can add an asterisk (*) at the beginning and/or the end of a process:
-#
-# *name suffix mode: will search for processes ending with 'name'
-# (/proc/PID/stat)
-#
-# name* prefix mode: will search for processes beginning with 'name'
-# (/proc/PID/stat)
-#
-# *name* substring mode: will search for 'name' in the whole command line
-# (/proc/PID/cmdline)
-#
-# If you enter even just one *name* (substring), apps.plugin will process
-# /proc/PID/cmdline for all processes, just once (when they are first seen).
-#
-# To add processes with single quotes, enclose them in double quotes
-# example: "process with this ' single quote"
-#
-# To add processes with double quotes, enclose them in single quotes:
-# example: 'process with this " double quote'
-#
-# If a group or process name starts with a -, the dimension will be hidden
-# (cpu chart only).
-#
-# If a process starts with a +, debugging will be enabled for it
-# (debugging produces a lot of output - do not enable it in production systems)
-#
-# You can add any number of groups you like. Only the ones found running will
-# affect the charts generated. However, producing charts with hundreds of
-# dimensions may slow down your web browser.
-#
-# The order of the entries in this list is important: the first that matches
-# a process is used, so put important ones at the top. Processes not matched
-# by any row, will inherit it from their parents or children.
-#
-# The order also controls the order of the dimensions on the generated charts
-# (although applications started after apps.plugin is started, will be appended
-# to the existing list of dimensions the netdata daemon maintains).
-
-# -----------------------------------------------------------------------------
-# NETDATA processes accounting
-
-# netdata main process
-netdata: netdata
-
-# netdata known plugins
-# plugins not defined here will be accumulated in netdata, above
-apps.plugin: apps.plugin
-freeipmi.plugin: freeipmi.plugin
-nfacct.plugin: nfacct.plugin
-cups.plugin: cups.plugin
-xenstat.plugin: xenstat.plugin
-perf.plugin: perf.plugin
-charts.d.plugin: *charts.d.plugin*
-python.d.plugin: *python.d.plugin*
-systemd-journal.plugin:*systemd-journal.plugin*
-tc-qos-helper: *tc-qos-helper.sh*
-fping: fping
-ioping: ioping
-go.d.plugin: *go.d.plugin*
-slabinfo.plugin: *slabinfo.plugin*
-ebpf.plugin: *ebpf.plugin*
-debugfs.plugin: *debugfs.plugin*
-logs-management.plugin: *logs-management.plugin*
-
-# agent-service-discovery
-agent_sd: agent_sd
-
-# -----------------------------------------------------------------------------
-# authentication/authorization related servers
-
-auth: radius* openldap* ldap* slapd authelia sssd saslauthd polkitd gssproxy
-fail2ban: fail2ban*
-
-# -----------------------------------------------------------------------------
-# web/ftp servers
-
-httpd: apache* httpd nginx* lighttpd hiawatha caddy h2o
-proxy: squid* c-icap squidGuard varnish*
-php: php* lsphp*
-ftpd: proftpd in.tftpd vsftpd
-uwsgi: uwsgi
-unicorn: *unicorn*
-puma: *puma*
-
-# -----------------------------------------------------------------------------
-# database servers
-
-sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
-nosql: mongod redis* memcached *couchdb*
-timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
-columndb: clickhouse-server*
-
-# -----------------------------------------------------------------------------
-# email servers
-
-mta: amavis* zmstat-* zmdiaglog zmmailboxdmgr opendkim postfwd2 smtp* lmtp* sendmail postfix master pickup qmgr showq tlsmgr postscreen oqmgr msmtp* nullmailer*
-mda: dovecot *imapd *pop3d *popd
-
-# -----------------------------------------------------------------------------
-# network, routing, VPN
-
-ppp: ppp*
-vpn: openvpn pptp* cjdroute gvpe tincd wireguard tailscaled
-wifi: hostapd wpa_supplicant
-routing: ospfd* ospf6d* bgpd bfdd fabricd isisd eigrpd sharpd staticd ripd ripngd pimd pbrd nhrpd ldpd zebra vrrpd vtysh bird*
-modem: ModemManager
-netmanager: NetworkManager nm* systemd-networkd networkctl netplan connmand wicked* avahi-autoipd networkd-dispatcher
-firewall: firewalld ufw nft
-tor: tor
-bluetooth: bluetooth bluetoothd bluez bluedevil obexd
-
-# -----------------------------------------------------------------------------
-# high availability and balancers
-
-camo: *camo*
-balancer: ipvs_* haproxy
-ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd keepalived ucarp*
-
-# -----------------------------------------------------------------------------
-# telephony
-
-pbx: asterisk safe_asterisk *vicidial*
-sip: opensips* stund
-
-# -----------------------------------------------------------------------------
-# chat
-
-chat: irssi *vines* *prosody* murmurd
-
-# -----------------------------------------------------------------------------
-# monitoring
-
-logs: ulogd* syslog* rsyslog* logrotate *systemd-journal* rotatelogs sysklogd metalog
-nms: snmpd vnstatd smokeping zabbix* munin* mon openhpid tailon nrpe
-monit: monit
-splunk: splunkd
-azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent*
-datadog: *datadog*
-edgedelta: edgedelta
-newrelic: newrelic*
-google-agent: *google_guest_agent* *google_osconfig_agent*
-nvidia-smi: nvidia-smi
-htop: htop
-watchdog: watchdog
-telegraf: telegraf
-grafana: grafana*
-
-# -----------------------------------------------------------------------------
-# storage, file systems and file servers
-
-ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* osdmaptool crushtool
-samba: smbd nmbd winbindd ctdbd ctdb-* ctdb_*
-nfs: rpcbind rpc.* nfs*
-zfs: spl_* z_* txg_* zil_* arc_* l2arc*
-btrfs: btrfs*
-iscsi: iscsid iscsi_eh
-afp: netatalk afpd cnid_dbd cnid_metad
-ntfs-3g: ntfs-3g
-
-# -----------------------------------------------------------------------------
-# kubernetes
-
-kubelet: kubelet
-kube-dns: kube-dns
-kube-proxy: kube-proxy
-metrics-server: metrics-server
-heapster: heapster
-
-# -----------------------------------------------------------------------------
-# AWS
-
-aws-s3: '*aws s3*' s3cmd s5cmd
-aws: aws
-
-# -----------------------------------------------------------------------------
-# virtualization platform
-
-proxmox-ve: pve* spiceproxy
-
-# -----------------------------------------------------------------------------
-# containers & virtual machines
-
-containers: lxc* docker* balena* containerd
-VMs: vbox* VBox* qemu* kvm*
-libvirt: virtlogd virtqemud virtstoraged virtnetworkd virtlockd virtinterfaced
-libvirt: virtnodedevd virtproxyd virtsecretd libvirtd
-guest-agent: qemu-ga spice-vdagent cloud-init*
-
-# -----------------------------------------------------------------------------
-# ssh servers and clients
-
-ssh: ssh* scp sftp* dropbear
-
-# -----------------------------------------------------------------------------
-# print servers and clients
-
-print: cups* lpd lpq
-
-# -----------------------------------------------------------------------------
-# time servers and clients
-
-time: ntp* systemd-timesyn* chronyd ptp*
-
-# -----------------------------------------------------------------------------
-# dhcp servers and clients
-
-dhcp: *dhcp* dhclient
-
-# -----------------------------------------------------------------------------
-# name servers and clients
-
-dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq *systemd-resolve* pihole* avahi-daemon avahi-dnsconfd
-dnsdist: dnsdist
-
-# -----------------------------------------------------------------------------
-# installation / compilation / debugging
-
-build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf
-build: cargo rustc bazel buck git gdb valgrind* rpmbuild dpkg-buildpackage
-
-# -----------------------------------------------------------------------------
-# package management
-
-packagemanager: apt* dpkg* dselect dnf yum rpm zypp* yast* pacman xbps* swupd* emerge*
-packagemanager: packagekitd pkgin pkg apk snapd slackpkg slapt-get
-
-# -----------------------------------------------------------------------------
-# antivirus
-
-antivirus: clam* *clam imunify360*
-
-# -----------------------------------------------------------------------------
-# torrent clients
-
-torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
-
-# -----------------------------------------------------------------------------
-# backup servers and clients
-
-backup: rsync lsyncd bacula* borg rclone
-
-# -----------------------------------------------------------------------------
-# cron
-
-cron: cron* atd anacron *systemd-cron* incrond
-
-# -----------------------------------------------------------------------------
-# UPS
-
-ups: upsmon upsd */nut/* apcupsd
-
-# -----------------------------------------------------------------------------
-# media players, servers, clients
-
-media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
-media: mpd minidlnad mt-daapd Plex* jellyfin squeeze* jackett Ombi
-media: strawberry* clementine*
-
-audio: pulse* pipewire wireplumber jack*
-
-# -----------------------------------------------------------------------------
-# java applications
-
-hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode*
-hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode*
-hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode*
-hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController*
-
-yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager*
-yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager*
-yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer*
-
-sparkworker: *org.apache.spark.deploy.worker.Worker*
-sparkmaster: *org.apache.spark.deploy.master.Master*
-
-hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer*
-hbaserest: *org.apache.hadoop.hbase.rest.RESTServer*
-hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer*
-hbasemaster: *org.apache.hadoop.hbase.master.HMaster*
-
-zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain*
-
-hive2: *org.apache.hive.service.server.HiveServer2*
-hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore*
-
-solr: *solr.install.dir*
-
-airflow: *airflow*
-
-# -----------------------------------------------------------------------------
-# GUI
-
-X: X Xorg xinit xdm Xwayland xsettingsd touchegg
-wayland: swaylock swayidle waypipe wayvnc
-kde: *kdeinit* kdm sddm plasmashell startplasma-* kwin* kwallet* krunner kactivitymanager*
-gnome: gnome-* gdm gconf* mutter
-mate: mate-* msd-* marco*
-cinnamon: cinnamon* muffin
-xfce: xfwm4 xfdesktop xfce* Thunar xfsettingsd xfconf*
-lxde: lxde* startlxde lxdm lxappearance* lxlauncher* lxpanel* lxsession* lxsettings*
-lxqt: lxqt* startlxqt
-enlightenment: entrance enlightenment*
-i3: i3*
-awesome: awesome awesome-client
-dwm: dwm.*
-sway: sway
-weston: weston
-cage: cage
-wayfire: wayfire
-gui: lightdm colord seatd greetd gkrellm slim qingy dconf* *gvfs gvfs*
-gui: '*systemd --user*' xdg-* at-spi-*
-
-webbrowser: *chrome-sandbox* *google-chrome* *chromium* *firefox* vivaldi* opera* epiphany chrome*
-webbrowser: lynx elinks w3m w3mmee links
-mua: evolution-* thunderbird* mutt neomutt pine mailx alpine
-
-# -----------------------------------------------------------------------------
-# Kernel / System
-
-ksmd: ksmd
-khugepaged: khugepaged
-kdamond: kdamond
-kswapd: kswapd
-zswap: zswap
-kcompactd: kcompactd
-
-system: systemd* udisks* udevd* *udevd ipv6_addrconf dbus-* rtkit*
-system: mdadm acpid uuidd upowerd elogind* eudev mdev lvmpolld dmeventd
-system: accounts-daemon rngd haveged rasdaemon irqbalance start-stop-daemon
-system: supervise-daemon openrc* init runit runsvdir runsv auditd lsmd
-system: abrt* nscd rtkit-daemon gpg-agent usbguard* boltd geoclue
-
-kernel: kworker kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod
-kernel: fsnotify_mark kthrotld deferwq scsi_* kdmflush oom_reaper kdevtempfs
-kernel: ksoftirqd
-
-# -----------------------------------------------------------------------------
-# inetd
-
-inetd: inetd xinetd
-
-# -----------------------------------------------------------------------------
-# other application servers
-
-consul: consul
-
-kafka: *kafka.Kafka*
-
-rabbitmq: *rabbitmq*
-
-sidekiq: *sidekiq*
-java: java
-ipfs: ipfs
-erlang: beam.smp
-
-node: node
-factorio: factorio
-
-p4: p4*
-
-git-services: gitea gitlab-runner
-
-freeswitch: freeswitch*
-
-# -------- web3 / blockchains ----------
-
-go-ethereum: geth*
-nethermind-ethereum: nethermind*
-besu-ethereum: besu*
-openEthereum: openethereum*
-urbit: urbit*
-bitcoin-node: *bitcoind* lnd*
-filecoin: lotus* lotus-miner* lotus-worker*
-solana: solana*
-web3: *hardhat* *ganache* *truffle* *brownie* *waffle*
-terra: terra* mantle*
-
-# -----------------------------------------------------------------------------
-# chaos engineering tools
-
-stress: stress stress-ng*
-gremlin: gremlin*
-
-# -----------------------------------------------------------------------------
-# load testing tools
-
-locust: locust
-
-# -----------------------------------------------------------------------------
-# data science and machine learning tools
-
-jupyter: jupyter*
-
-# -----------------------------------------------------------------------------
-# File synchronization tools
-
-filesync: dropbox syncthing
diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c
deleted file mode 100644
index 5bcda84f4..000000000
--- a/collectors/apps.plugin/apps_plugin.c
+++ /dev/null
@@ -1,5422 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-/*
- * netdata apps.plugin
- * (C) Copyright 2023 Netdata Inc.
- * Released under GPL v3+
- */
-
-#include "collectors/all.h"
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#define APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION "Detailed information on the currently running processes."
-
-#define APPS_PLUGIN_FUNCTIONS() do { \
- fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " \"processes\" %d \"%s\"\n", PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT, APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION); \
- } while(0)
-
-#define APPS_PLUGIN_GLOBAL_FUNCTIONS() do { \
- fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"processes\" %d \"%s\"\n", PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT, APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION); \
- } while(0)
-
-// ----------------------------------------------------------------------------
-// debugging
-
-static int debug_enabled = 0;
-static inline void debug_log_int(const char *fmt, ... ) {
- va_list args;
-
- fprintf( stderr, "apps.plugin: ");
- va_start( args, fmt );
- vfprintf( stderr, fmt, args );
- va_end( args );
-
- fputc('\n', stderr);
-}
-
-#ifdef NETDATA_INTERNAL_CHECKS
-
-#define debug_log(fmt, args...) do { if(unlikely(debug_enabled)) debug_log_int(fmt, ##args); } while(0)
-
-#else
-
-static inline void debug_log_dummy(void) {}
-#define debug_log(fmt, args...) debug_log_dummy()
-
-#endif
-
-
-// ----------------------------------------------------------------------------
-
-#ifdef __FreeBSD__
-#include <sys/user.h>
-#endif
-
-// ----------------------------------------------------------------------------
-// per O/S configuration
-
-// the minimum PID of the system
-// this is also the pid of the init process
-#define INIT_PID 1
-
-// if the way apps.plugin will work, will read the entire process list,
-// including the resource utilization of each process, instantly
-// set this to 1
-// when set to 0, apps.plugin builds a sort list of processes, in order
-// to process children processes, before parent processes
-#ifdef __FreeBSD__
-#define ALL_PIDS_ARE_READ_INSTANTLY 1
-#else
-#define ALL_PIDS_ARE_READ_INSTANTLY 0
-#endif
-
-// ----------------------------------------------------------------------------
-// string lengths
-
-#define MAX_COMPARE_NAME 100
-#define MAX_NAME 100
-#define MAX_CMDLINE 16384
-
-// ----------------------------------------------------------------------------
-// the rates we are going to send to netdata will have this detail a value of:
-// - 1 will send just integer parts to netdata
-// - 100 will send 2 decimal points
-// - 1000 will send 3 decimal points
-// etc.
-#define RATES_DETAIL 10000ULL
-
-// ----------------------------------------------------------------------------
-// factor for calculating correct CPU time values depending on units of raw data
-static unsigned int time_factor = 0;
-
-// ----------------------------------------------------------------------------
-// to avoid reallocating too frequently, we can increase the number of spare
-// file descriptors used by processes.
-// IMPORTANT:
-// having a lot of spares, increases the CPU utilization of the plugin.
-#define MAX_SPARE_FDS 1
-
-// ----------------------------------------------------------------------------
-// command line options
-
-static int
- update_every = 1,
- enable_guest_charts = 0,
-#ifdef __FreeBSD__
- enable_file_charts = 0,
-#else
- enable_file_charts = 1,
- max_fds_cache_seconds = 60,
-#endif
- enable_function_cmdline = 0,
- enable_detailed_uptime_charts = 0,
- enable_users_charts = 1,
- enable_groups_charts = 1,
- include_exited_childs = 1;
-
-// will be changed to getenv(NETDATA_USER_CONFIG_DIR) if it exists
-static char *user_config_dir = CONFIG_DIR;
-static char *stock_config_dir = LIBCONFIG_DIR;
-
-// some variables for keeping track of processes count by states
-typedef enum {
- PROC_STATUS_RUNNING = 0,
- PROC_STATUS_SLEEPING_D, // uninterruptible sleep
- PROC_STATUS_SLEEPING, // interruptible sleep
- PROC_STATUS_ZOMBIE,
- PROC_STATUS_STOPPED,
- PROC_STATUS_END, //place holder for ending enum fields
-} proc_state;
-
-#ifndef __FreeBSD__
-static proc_state proc_state_count[PROC_STATUS_END];
-static const char *proc_states[] = {
- [PROC_STATUS_RUNNING] = "running",
- [PROC_STATUS_SLEEPING] = "sleeping_interruptible",
- [PROC_STATUS_SLEEPING_D] = "sleeping_uninterruptible",
- [PROC_STATUS_ZOMBIE] = "zombie",
- [PROC_STATUS_STOPPED] = "stopped",
- };
-#endif
-
-// ----------------------------------------------------------------------------
-// internal flags
-// handled in code (automatically set)
-
-// log each problem once per process
-// log flood protection flags (log_thrown)
-typedef enum __attribute__((packed)) {
- PID_LOG_IO = (1 << 0),
- PID_LOG_STATUS = (1 << 1),
- PID_LOG_CMDLINE = (1 << 2),
- PID_LOG_FDS = (1 << 3),
- PID_LOG_STAT = (1 << 4),
- PID_LOG_LIMITS = (1 << 5),
- PID_LOG_LIMITS_DETAIL = (1 << 6),
-} PID_LOG;
-
-static int
- show_guest_time = 0, // 1 when guest values are collected
- show_guest_time_old = 0,
- proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
-
-
-// ----------------------------------------------------------------------------
-// internal counters
-
-static size_t
- global_iterations_counter = 1,
- calls_counter = 0,
- file_counter = 0,
- filenames_allocated_counter = 0,
- inodes_changed_counter = 0,
- links_changed_counter = 0,
- targets_assignment_counter = 0;
-
-
-// ----------------------------------------------------------------------------
-// Normalization
-//
-// With normalization we lower the collected metrics by a factor to make them
-// match the total utilization of the system.
-// The discrepancy exists because apps.plugin needs some time to collect all
-// the metrics. This results in utilization that exceeds the total utilization
-// of the system.
-//
-// During normalization, we align the per-process utilization, to the total of
-// the system. We first consume the exited children utilization and it the
-// collected values is above the total, we proportionally scale each reported
-// metric.
-
-// the total system time, as reported by /proc/stat
-static kernel_uint_t
- global_utime = 0,
- global_stime = 0,
- global_gtime = 0;
-
-// the normalization ratios, as calculated by normalize_utilization()
-NETDATA_DOUBLE
- utime_fix_ratio = 1.0,
- stime_fix_ratio = 1.0,
- gtime_fix_ratio = 1.0,
- minflt_fix_ratio = 1.0,
- majflt_fix_ratio = 1.0,
- cutime_fix_ratio = 1.0,
- cstime_fix_ratio = 1.0,
- cgtime_fix_ratio = 1.0,
- cminflt_fix_ratio = 1.0,
- cmajflt_fix_ratio = 1.0;
-
-
-struct pid_on_target {
- int32_t pid;
- struct pid_on_target *next;
-};
-
-struct openfds {
- kernel_uint_t files;
- kernel_uint_t pipes;
- kernel_uint_t sockets;
- kernel_uint_t inotifies;
- kernel_uint_t eventfds;
- kernel_uint_t timerfds;
- kernel_uint_t signalfds;
- kernel_uint_t eventpolls;
- kernel_uint_t other;
-};
-
-#define pid_openfds_sum(p) ((p)->openfds.files + (p)->openfds.pipes + (p)->openfds.sockets + (p)->openfds.inotifies + (p)->openfds.eventfds + (p)->openfds.timerfds + (p)->openfds.signalfds + (p)->openfds.eventpolls + (p)->openfds.other)
-
-struct pid_limits {
-// kernel_uint_t max_cpu_time;
-// kernel_uint_t max_file_size;
-// kernel_uint_t max_data_size;
-// kernel_uint_t max_stack_size;
-// kernel_uint_t max_core_file_size;
-// kernel_uint_t max_resident_set;
-// kernel_uint_t max_processes;
- kernel_uint_t max_open_files;
-// kernel_uint_t max_locked_memory;
-// kernel_uint_t max_address_space;
-// kernel_uint_t max_file_locks;
-// kernel_uint_t max_pending_signals;
-// kernel_uint_t max_msgqueue_size;
-// kernel_uint_t max_nice_priority;
-// kernel_uint_t max_realtime_priority;
-// kernel_uint_t max_realtime_timeout;
-};
-
-// ----------------------------------------------------------------------------
-// target
-//
-// target is the structure that processes are aggregated to be reported
-// to netdata.
-//
-// - Each entry in /etc/apps_groups.conf creates a target.
-// - Each user and group used by a process in the system, creates a target.
-
-struct target {
- char compare[MAX_COMPARE_NAME + 1];
- uint32_t comparehash;
- size_t comparelen;
-
- char id[MAX_NAME + 1];
- uint32_t idhash;
-
- char name[MAX_NAME + 1];
- char clean_name[MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots)
- uid_t uid;
- gid_t gid;
-
- bool is_other;
-
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
- kernel_uint_t num_threads;
- // kernel_uint_t rss;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
- kernel_uint_t status_voluntary_ctxt_switches;
- kernel_uint_t status_nonvoluntary_ctxt_switches;
-
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- kernel_uint_t io_read_calls;
- kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- kernel_uint_t io_cancelled_write_bytes;
-
- int *target_fds;
- int target_fds_size;
-
- struct openfds openfds;
-
- NETDATA_DOUBLE max_open_files_percent;
-
- kernel_uint_t starttime;
- kernel_uint_t collected_starttime;
- kernel_uint_t uptime_min;
- kernel_uint_t uptime_sum;
- kernel_uint_t uptime_max;
-
- unsigned int processes; // how many processes have been merged to this
- int exposed; // if set, we have sent this to netdata
- int hidden; // if set, we set the hidden flag on the dimension
- int debug_enabled;
- int ends_with;
- int starts_with; // if set, the compare string matches only the
- // beginning of the command
-
- struct pid_on_target *root_pid; // list of aggregated pids for target debugging
-
- struct target *target; // the one that will be reported to netdata
- struct target *next;
-};
-
-struct target
- *apps_groups_default_target = NULL, // the default target
- *apps_groups_root_target = NULL, // apps_groups.conf defined
- *users_root_target = NULL, // users
- *groups_root_target = NULL; // user groups
-
-size_t
- apps_groups_targets_count = 0; // # of apps_groups.conf targets
-
-
-// ----------------------------------------------------------------------------
-// pid_stat
-//
-// structure to store data for each process running
-// see: man proc for the description of the fields
-
-struct pid_fd {
- int fd;
-
-#ifndef __FreeBSD__
- ino_t inode;
- char *filename;
- uint32_t link_hash;
- size_t cache_iterations_counter;
- size_t cache_iterations_reset;
-#endif
-};
-
-struct pid_stat {
- int32_t pid;
- int32_t ppid;
- // int32_t pgrp;
- // int32_t session;
- // int32_t tty_nr;
- // int32_t tpgid;
- // uint64_t flags;
-
- char state;
-
- char comm[MAX_COMPARE_NAME + 1];
- char *cmdline;
-
- // these are raw values collected
- kernel_uint_t minflt_raw;
- kernel_uint_t cminflt_raw;
- kernel_uint_t majflt_raw;
- kernel_uint_t cmajflt_raw;
- kernel_uint_t utime_raw;
- kernel_uint_t stime_raw;
- kernel_uint_t gtime_raw; // guest_time
- kernel_uint_t cutime_raw;
- kernel_uint_t cstime_raw;
- kernel_uint_t cgtime_raw; // cguest_time
-
- // these are rates
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
-
- // int64_t priority;
- // int64_t nice;
- int32_t num_threads;
- // int64_t itrealvalue;
- kernel_uint_t collected_starttime;
- // kernel_uint_t vsize;
- // kernel_uint_t rss;
- // kernel_uint_t rsslim;
- // kernel_uint_t starcode;
- // kernel_uint_t endcode;
- // kernel_uint_t startstack;
- // kernel_uint_t kstkesp;
- // kernel_uint_t kstkeip;
- // uint64_t signal;
- // uint64_t blocked;
- // uint64_t sigignore;
- // uint64_t sigcatch;
- // uint64_t wchan;
- // uint64_t nswap;
- // uint64_t cnswap;
- // int32_t exit_signal;
- // int32_t processor;
- // uint32_t rt_priority;
- // uint32_t policy;
- // kernel_uint_t delayacct_blkio_ticks;
-
- uid_t uid;
- gid_t gid;
-
- kernel_uint_t status_voluntary_ctxt_switches_raw;
- kernel_uint_t status_nonvoluntary_ctxt_switches_raw;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
- kernel_uint_t status_voluntary_ctxt_switches;
- kernel_uint_t status_nonvoluntary_ctxt_switches;
-#ifndef __FreeBSD__
- ARL_BASE *status_arl;
-#endif
-
- kernel_uint_t io_logical_bytes_read_raw;
- kernel_uint_t io_logical_bytes_written_raw;
- kernel_uint_t io_read_calls_raw;
- kernel_uint_t io_write_calls_raw;
- kernel_uint_t io_storage_bytes_read_raw;
- kernel_uint_t io_storage_bytes_written_raw;
- kernel_uint_t io_cancelled_write_bytes_raw;
-
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- kernel_uint_t io_read_calls;
- kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- kernel_uint_t io_cancelled_write_bytes;
-
- kernel_uint_t uptime;
-
- struct pid_fd *fds; // array of fds it uses
- size_t fds_size; // the size of the fds array
-
- struct openfds openfds;
- struct pid_limits limits;
-
- NETDATA_DOUBLE openfds_limits_percent;
-
- int sortlist; // higher numbers = top on the process tree
- // each process gets a unique number
-
- int children_count; // number of processes directly referencing this
- int keeploops; // increases by 1 every time keep is 1 and updated 0
-
- PID_LOG log_thrown;
-
- bool keep; // true when we need to keep this process in memory even after it exited
- bool updated; // true when the process is currently running
- bool merged; // true when it has been merged to its parent
- bool read; // true when we have already read this process for this iteration
- bool matched_by_config;
-
- struct target *target; // app_groups.conf targets
- struct target *user_target; // uid based targets
- struct target *group_target; // gid based targets
-
- usec_t stat_collected_usec;
- usec_t last_stat_collected_usec;
-
- usec_t io_collected_usec;
- usec_t last_io_collected_usec;
- usec_t last_limits_collected_usec;
-
- char *fds_dirname; // the full directory name in /proc/PID/fd
-
- char *stat_filename;
- char *status_filename;
- char *io_filename;
- char *cmdline_filename;
- char *limits_filename;
-
- struct pid_stat *parent;
- struct pid_stat *prev;
- struct pid_stat *next;
-};
-
-size_t pagesize;
-
-kernel_uint_t global_uptime;
-
-static struct pid_stat
- *root_of_pids = NULL, // global list of all processes running
- **all_pids = NULL; // to avoid allocations, we pre-allocate
- // a pointer for each pid in the entire pid space.
-
-static size_t
- all_pids_count = 0; // the number of processes running
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
-// Another pre-allocated list of all possible pids.
-// We need it to pids and assign them a unique sortlist id, so that we
-// read parents before children. This is needed to prevent a situation where
-// a child is found running, but until we read its parent, it has exited and
-// its parent has accumulated its resources.
-static pid_t
- *all_pids_sortlist = NULL;
-#endif
-
-// ----------------------------------------------------------------------------
-// file descriptor
-//
-// this is used to keep a global list of all open files of the system.
-// it is needed in order to calculate the unique files processes have open.
-
-#define FILE_DESCRIPTORS_INCREASE_STEP 100
-
-// types for struct file_descriptor->type
-typedef enum fd_filetype {
- FILETYPE_OTHER,
- FILETYPE_FILE,
- FILETYPE_PIPE,
- FILETYPE_SOCKET,
- FILETYPE_INOTIFY,
- FILETYPE_EVENTFD,
- FILETYPE_EVENTPOLL,
- FILETYPE_TIMERFD,
- FILETYPE_SIGNALFD
-} FD_FILETYPE;
-
-struct file_descriptor {
- avl_t avl;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- uint32_t magic;
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- const char *name;
- uint32_t hash;
-
- FD_FILETYPE type;
- int count;
- int pos;
-} *all_files = NULL;
-
-static int
- all_files_len = 0,
- all_files_size = 0;
-
-// ----------------------------------------------------------------------------
-// read users and groups from files
-
-struct user_or_group_id {
- avl_t avl;
-
- union {
- uid_t uid;
- gid_t gid;
- } id;
-
- char *name;
-
- int updated;
-
- struct user_or_group_id * next;
-};
-
-enum user_or_group_id_type {
- USER_ID,
- GROUP_ID
-};
-
-struct user_or_group_ids{
- enum user_or_group_id_type type;
-
- avl_tree_type index;
- struct user_or_group_id *root;
-
- char filename[FILENAME_MAX + 1];
-};
-
-int user_id_compare(void* a, void* b) {
- if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid)
- return -1;
-
- else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid)
- return 1;
-
- else
- return 0;
-}
-
-struct user_or_group_ids all_user_ids = {
- .type = USER_ID,
-
- .index = {
- NULL,
- user_id_compare
- },
-
- .root = NULL,
-
- .filename = "",
-};
-
-int group_id_compare(void* a, void* b) {
- if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid)
- return -1;
-
- else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid)
- return 1;
-
- else
- return 0;
-}
-
-struct user_or_group_ids all_group_ids = {
- .type = GROUP_ID,
-
- .index = {
- NULL,
- group_id_compare
- },
-
- .root = NULL,
-
- .filename = "",
-};
-
-int file_changed(const struct stat *statbuf, struct timespec *last_modification_time) {
- if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec &&
- statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0;
-
- last_modification_time->tv_sec = statbuf->st_mtim.tv_sec;
- last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec;
-
- return 1;
-}
-
-int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) {
- struct stat statbuf;
- if(unlikely(stat(ids->filename, &statbuf)))
- return 1;
- else
- if(likely(!file_changed(&statbuf, last_modification_time))) return 0;
-
- procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 1;
-
- size_t line, lines = procfile_lines(ff);
-
- for(line = 0; line < lines ;line++) {
- size_t words = procfile_linewords(ff, line);
- if(unlikely(words < 3)) continue;
-
- char *name = procfile_lineword(ff, line, 0);
- if(unlikely(!name || !*name)) continue;
-
- char *id_string = procfile_lineword(ff, line, 2);
- if(unlikely(!id_string || !*id_string)) continue;
-
-
- struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id));
-
- if(ids->type == USER_ID)
- user_or_group_id->id.uid = (uid_t) str2ull(id_string, NULL);
- else
- user_or_group_id->id.gid = (uid_t) str2ull(id_string, NULL);
-
- user_or_group_id->name = strdupz(name);
- user_or_group_id->updated = 1;
-
- struct user_or_group_id *existing_user_id = NULL;
-
- if(likely(ids->root))
- existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl_t *) user_or_group_id);
-
- if(unlikely(existing_user_id)) {
- freez(existing_user_id->name);
- existing_user_id->name = user_or_group_id->name;
- existing_user_id->updated = 1;
- freez(user_or_group_id);
- }
- else {
- if(unlikely(avl_insert(&ids->index, (avl_t *) user_or_group_id) != (void *) user_or_group_id)) {
- netdata_log_error("INTERNAL ERROR: duplicate indexing of id during realloc");
- };
-
- user_or_group_id->next = ids->root;
- ids->root = user_or_group_id;
- }
- }
-
- procfile_close(ff);
-
- // remove unused ids
- struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL;
-
- while(user_or_group_id) {
- if(unlikely(!user_or_group_id->updated)) {
- if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl_t *) user_or_group_id) != user_or_group_id))
- netdata_log_error("INTERNAL ERROR: removal of unused id from index, removed a different id");
-
- if(prev_user_id)
- prev_user_id->next = user_or_group_id->next;
- else
- ids->root = user_or_group_id->next;
-
- freez(user_or_group_id->name);
- freez(user_or_group_id);
-
- if(prev_user_id)
- user_or_group_id = prev_user_id->next;
- else
- user_or_group_id = ids->root;
- }
- else {
- user_or_group_id->updated = 0;
-
- prev_user_id = user_or_group_id;
- user_or_group_id = user_or_group_id->next;
- }
- }
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// apps_groups.conf
-// aggregate all processes in groups, to have a limited number of dimensions
-
-static struct target *get_users_target(uid_t uid) {
- struct target *w;
- for(w = users_root_target ; w ; w = w->next)
- if(w->uid == uid) return w;
-
- w = callocz(sizeof(struct target), 1);
- snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid);
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
-
- snprintfz(w->id, MAX_NAME, "%u", uid);
- w->idhash = simple_hash(w->id);
-
- struct user_or_group_id user_id_to_find, *user_or_group_id = NULL;
- user_id_to_find.id.uid = uid;
-
- if(*netdata_configured_host_prefix) {
- static struct timespec last_passwd_modification_time;
- int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time);
-
- if(likely(!ret && all_user_ids.index.root))
- user_or_group_id = (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl_t *) &user_id_to_find);
- }
-
- if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name) {
- snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name);
- }
- else {
- struct passwd *pw = getpwuid(uid);
- if(!pw || !pw->pw_name || !*pw->pw_name)
- snprintfz(w->name, MAX_NAME, "%u", uid);
- else
- snprintfz(w->name, MAX_NAME, "%s", pw->pw_name);
- }
-
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
-
- w->uid = uid;
-
- w->next = users_root_target;
- users_root_target = w;
-
- debug_log("added uid %u ('%s') target", w->uid, w->name);
-
- return w;
-}
-
-struct target *get_groups_target(gid_t gid)
-{
- struct target *w;
- for(w = groups_root_target ; w ; w = w->next)
- if(w->gid == gid) return w;
-
- w = callocz(sizeof(struct target), 1);
- snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid);
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
-
- snprintfz(w->id, MAX_NAME, "%u", gid);
- w->idhash = simple_hash(w->id);
-
- struct user_or_group_id group_id_to_find, *group_id = NULL;
- group_id_to_find.id.gid = gid;
-
- if(*netdata_configured_host_prefix) {
- static struct timespec last_group_modification_time;
- int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time);
-
- if(likely(!ret && all_group_ids.index.root))
- group_id = (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl_t *) &group_id_to_find);
- }
-
- if(group_id && group_id->name && *group_id->name) {
- snprintfz(w->name, MAX_NAME, "%s", group_id->name);
- }
- else {
- struct group *gr = getgrgid(gid);
- if(!gr || !gr->gr_name || !*gr->gr_name)
- snprintfz(w->name, MAX_NAME, "%u", gid);
- else
- snprintfz(w->name, MAX_NAME, "%s", gr->gr_name);
- }
-
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
-
- w->gid = gid;
-
- w->next = groups_root_target;
- groups_root_target = w;
-
- debug_log("added gid %u ('%s') target", w->gid, w->name);
-
- return w;
-}
-
-// find or create a new target
-// there are targets that are just aggregated to other target (the second argument)
-static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) {
- int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0;
- const char *nid = id;
-
- // extract the options
- while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
- if(nid[0] == '-') thidden = 1;
- if(nid[0] == '+') tdebug = 1;
- if(nid[0] == '*') ends_with = 1;
- nid++;
- }
- uint32_t hash = simple_hash(id);
-
- // find if it already exists
- struct target *w, *last = apps_groups_root_target;
- for(w = apps_groups_root_target ; w ; w = w->next) {
- if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
- return w;
-
- last = w;
- }
-
- // find an existing target
- if(unlikely(!target)) {
- while(*name == '-') {
- if(*name == '-') thidden = 1;
- name++;
- }
-
- for(target = apps_groups_root_target ; target != NULL ; target = target->next) {
- if(!target->target && strcmp(name, target->name) == 0)
- break;
- }
-
- if(unlikely(debug_enabled)) {
- if(unlikely(target))
- debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id);
- else
- debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id);
- }
- }
-
- if(target && target->target)
- fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id);
-
- w = callocz(sizeof(struct target), 1);
- strncpyz(w->id, nid, MAX_NAME);
- w->idhash = simple_hash(w->id);
-
- if(unlikely(!target))
- // copy the name
- strncpyz(w->name, name, MAX_NAME);
- else
- // copy the id
- strncpyz(w->name, nid, MAX_NAME);
-
- // dots are used to distinguish chart type and id in streaming, so we should replace them
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
- for (char *d = w->clean_name; *d; d++) {
- if (*d == '.')
- *d = '_';
- }
-
- strncpyz(w->compare, nid, MAX_COMPARE_NAME);
- size_t len = strlen(w->compare);
- if(w->compare[len - 1] == '*') {
- w->compare[len - 1] = '\0';
- w->starts_with = 1;
- }
- w->ends_with = ends_with;
-
- if(w->starts_with && w->ends_with)
- proc_pid_cmdline_is_needed = 1;
-
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
-
- w->hidden = thidden;
-#ifdef NETDATA_INTERNAL_CHECKS
- w->debug_enabled = tdebug;
-#else
- if(tdebug)
- fprintf(stderr, "apps.plugin has been compiled without debugging\n");
-#endif
- w->target = target;
-
- // append it, to maintain the order in apps_groups.conf
- if(last) last->next = w;
- else apps_groups_root_target = w;
-
- debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s"
- , w->id
- , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact"))
- , w->target?w->target->name:w->name
- , (w->hidden)?"hidden":"-"
- , (w->debug_enabled)?"debug":"-"
- );
-
- return w;
-}
-
-// read the apps_groups.conf file
-static int read_apps_groups_conf(const char *path, const char *file)
-{
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
-
- debug_log("process groups file: '%s'", filename);
-
- // ----------------------------------------
-
- procfile *ff = procfile_open(filename, " :\t", PROCFILE_FLAG_DEFAULT);
- if(!ff) return 1;
-
- procfile_set_quotes(ff, "'\"");
-
- ff = procfile_readall(ff);
- if(!ff)
- return 1;
-
- size_t line, lines = procfile_lines(ff);
-
- for(line = 0; line < lines ;line++) {
- size_t word, words = procfile_linewords(ff, line);
- if(!words) continue;
-
- char *name = procfile_lineword(ff, line, 0);
- if(!name || !*name) continue;
-
- // find a possibly existing target
- struct target *w = NULL;
-
- // loop through all words, skipping the first one (the name)
- for(word = 0; word < words ;word++) {
- char *s = procfile_lineword(ff, line, word);
- if(!s || !*s) continue;
- if(*s == '#') break;
-
- // is this the first word? skip it
- if(s == name) continue;
-
- // add this target
- struct target *n = get_apps_groups_target(s, w, name);
- if(!n) {
- netdata_log_error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
- continue;
- }
-
- // just some optimization
- // to avoid searching for a target for each process
- if(!w) w = n->target?n->target:n;
- }
- }
-
- procfile_close(ff);
-
- apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
- if(!apps_groups_default_target)
- fatal("Cannot create default target");
- apps_groups_default_target->is_other = true;
-
- // allow the user to override group 'other'
- if(apps_groups_default_target->target)
- apps_groups_default_target = apps_groups_default_target->target;
-
- return 0;
-}
-
-
-// ----------------------------------------------------------------------------
-// struct pid_stat management
-static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size);
-
-static inline struct pid_stat *get_pid_entry(pid_t pid) {
- if(unlikely(all_pids[pid]))
- return all_pids[pid];
-
- struct pid_stat *p = callocz(sizeof(struct pid_stat), 1);
- p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS);
- p->fds_size = MAX_SPARE_FDS;
- init_pid_fds(p, 0, p->fds_size);
- p->pid = pid;
-
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root_of_pids, p, prev, next);
-
- all_pids[pid] = p;
- all_pids_count++;
-
- return p;
-}
-
-static inline void del_pid_entry(pid_t pid) {
- struct pid_stat *p = all_pids[pid];
-
- if(unlikely(!p)) {
- netdata_log_error("attempted to free pid %d that is not allocated.", pid);
- return;
- }
-
- debug_log("process %d %s exited, deleting it.", pid, p->comm);
-
- DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root_of_pids, p, prev, next);
-
- // free the filename
-#ifndef __FreeBSD__
- {
- size_t i;
- for(i = 0; i < p->fds_size; i++)
- if(p->fds[i].filename)
- freez(p->fds[i].filename);
- }
-#endif
- freez(p->fds);
-
- freez(p->fds_dirname);
- freez(p->stat_filename);
- freez(p->status_filename);
- freez(p->limits_filename);
-#ifndef __FreeBSD__
- arl_free(p->status_arl);
-#endif
- freez(p->io_filename);
- freez(p->cmdline_filename);
- freez(p->cmdline);
- freez(p);
-
- all_pids[pid] = NULL;
- all_pids_count--;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline int managed_log(struct pid_stat *p, PID_LOG log, int status) {
- if(unlikely(!status)) {
- // netdata_log_error("command failed log %u, errno %d", log, errno);
-
- if(unlikely(debug_enabled || errno != ENOENT)) {
- if(unlikely(debug_enabled || !(p->log_thrown & log))) {
- p->log_thrown |= log;
- switch(log) {
- case PID_LOG_IO:
- #ifdef __FreeBSD__
- netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_STATUS:
- #ifdef __FreeBSD__
- netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_CMDLINE:
- #ifdef __FreeBSD__
- netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_FDS:
- #ifdef __FreeBSD__
- netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_LIMITS:
- #ifdef __FreeBSD__
- ;
- #else
- netdata_log_error("Cannot process %s/proc/%d/limits (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
-
- case PID_LOG_STAT:
- break;
-
- default:
- netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
- break;
- }
- }
- }
- errno = 0;
- }
- else if(unlikely(p->log_thrown & log)) {
- // netdata_log_error("unsetting log %u on pid %d", log, p->pid);
- p->log_thrown &= ~log;
- }
-
- return status;
-}
-
-static inline void assign_target_to_pid(struct pid_stat *p) {
- targets_assignment_counter++;
-
- uint32_t hash = simple_hash(p->comm);
- size_t pclen = strlen(p->comm);
-
- struct target *w;
- for(w = apps_groups_root_target; w ; w = w->next) {
- // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
-
- // find it - 4 cases:
- // 1. the target is not a pattern
- // 2. the target has the prefix
- // 3. the target has the suffix
- // 4. the target is something inside cmdline
-
- if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm))
- || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen))
- || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen]))
- || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))
- ))) {
-
- p->matched_by_config = true;
- if(w->target) p->target = w->target;
- else p->target = w;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("%s linked to target %s", p->comm, p->target->name);
-
- break;
- }
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// update pids from proc
-
-static inline int read_proc_pid_cmdline(struct pid_stat *p) {
- static char cmdline[MAX_CMDLINE + 1];
-
-#ifdef __FreeBSD__
- size_t i, bytes = MAX_CMDLINE;
- int mib[4];
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_ARGS;
- mib[3] = p->pid;
- if (unlikely(sysctl(mib, 4, cmdline, &bytes, NULL, 0)))
- goto cleanup;
-#else
- if(unlikely(!p->cmdline_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
- p->cmdline_filename = strdupz(filename);
- }
-
- int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
- if(unlikely(fd == -1)) goto cleanup;
-
- ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
- close(fd);
-
- if(unlikely(bytes < 0)) goto cleanup;
-#endif
-
- cmdline[bytes] = '\0';
- for(i = 0; i < bytes ; i++) {
- if(unlikely(!cmdline[i])) cmdline[i] = ' ';
- }
-
- if(p->cmdline) freez(p->cmdline);
- p->cmdline = strdupz(cmdline);
-
- debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
-
- return 1;
-
-cleanup:
- // copy the command to the command line
- if(p->cmdline) freez(p->cmdline);
- p->cmdline = strdupz(p->comm);
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// macro to calculate the incremental rate of a value
-// each parameter is accessed only ONCE - so it is safe to pass function calls
-// or other macros as parameters
-
-#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) { \
- kernel_uint_t _new_tmp = new_kernel_value; \
- (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \
- (last_kernel_variable) = _new_tmp; \
- }
-
-// the same macro for struct pid members
-#define pid_incremental_rate(type, var, value) \
- incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec)
-
-
-// ----------------------------------------------------------------------------
-
-#ifndef __FreeBSD__
-struct arl_callback_ptr {
- struct pid_stat *p;
- procfile *ff;
- size_t line;
-};
-
-void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
-
- //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1);
- const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2);
- //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3);
- //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4);
-
- if(likely(effective_uid && *effective_uid))
- aptr->p->uid = (uid_t)str2l(effective_uid);
-}
-
-void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
-
- //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1);
- const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2);
- //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3);
- //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4);
-
- if(likely(effective_gid && *effective_gid))
- aptr->p->gid = (uid_t)str2l(effective_gid);
-}
-
-void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_voluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
-
- struct pid_stat *p = aptr->p;
- pid_incremental_rate(stat, p->status_voluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
-}
-
-void arl_callback_status_nonvoluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
-
- struct pid_stat *p = aptr->p;
- pid_incremental_rate(stat, p->status_nonvoluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
-}
-
-static void update_proc_state_count(char proc_state) {
- switch (proc_state) {
- case 'S':
- proc_state_count[PROC_STATUS_SLEEPING] += 1;
- break;
- case 'R':
- proc_state_count[PROC_STATUS_RUNNING] += 1;
- break;
- case 'D':
- proc_state_count[PROC_STATUS_SLEEPING_D] += 1;
- break;
- case 'Z':
- proc_state_count[PROC_STATUS_ZOMBIE] += 1;
- break;
- case 'T':
- proc_state_count[PROC_STATUS_STOPPED] += 1;
- break;
- default:
- break;
- }
-}
-#endif // !__FreeBSD__
-
-#define MAX_PROC_PID_LIMITS 8192
-#define PROC_PID_LIMITS_MAX_OPEN_FILES_KEY "\nMax open files "
-
-static inline kernel_uint_t get_proc_pid_limits_limit(char *buf, const char *key, size_t key_len, kernel_uint_t def) {
- char *line = strstr(buf, key);
- if(!line)
- return def;
-
- char *v = &line[key_len];
- while(isspace(*v)) v++;
-
- if(strcmp(v, "unlimited") == 0)
- return 0;
-
- return str2ull(v, NULL);
-}
-
-static inline int read_proc_pid_limits(struct pid_stat *p, void *ptr) {
- (void)ptr;
-
-#ifdef __FreeBSD__
- return 0;
-#else
- static char proc_pid_limits_buffer[MAX_PROC_PID_LIMITS + 1];
- int ret = 0;
- bool read_limits = false;
-
- errno = 0;
- proc_pid_limits_buffer[0] = '\0';
-
- kernel_uint_t all_fds = pid_openfds_sum(p);
- if(all_fds < p->limits.max_open_files / 2 && p->io_collected_usec > p->last_limits_collected_usec && p->io_collected_usec - p->last_limits_collected_usec <= 60 * USEC_PER_SEC) {
- // too frequent, we want to collect limits once per minute
- ret = 1;
- goto cleanup;
- }
-
- if(unlikely(!p->limits_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/limits", netdata_configured_host_prefix, p->pid);
- p->limits_filename = strdupz(filename);
- }
-
- int fd = open(p->limits_filename, procfile_open_flags, 0666);
- if(unlikely(fd == -1)) goto cleanup;
-
- ssize_t bytes = read(fd, proc_pid_limits_buffer, MAX_PROC_PID_LIMITS);
- close(fd);
-
- if(bytes <= 0)
- goto cleanup;
-
- // make it '\0' terminated
- if(bytes < MAX_PROC_PID_LIMITS)
- proc_pid_limits_buffer[bytes] = '\0';
- else
- proc_pid_limits_buffer[MAX_PROC_PID_LIMITS - 1] = '\0';
-
- p->limits.max_open_files = get_proc_pid_limits_limit(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY, sizeof(PROC_PID_LIMITS_MAX_OPEN_FILES_KEY) - 1, 0);
- if(p->limits.max_open_files == 1) {
- // it seems a bug in the kernel or something similar
- // it sets max open files to 1 but the number of files
- // the process has open are more than 1...
- // https://github.com/netdata/netdata/issues/15443
- p->limits.max_open_files = 0;
- ret = 1;
- goto cleanup;
- }
-
- p->last_limits_collected_usec = p->io_collected_usec;
- read_limits = true;
-
- ret = 1;
-
-cleanup:
- if(p->limits.max_open_files)
- p->openfds_limits_percent = (NETDATA_DOUBLE)all_fds * 100.0 / (NETDATA_DOUBLE)p->limits.max_open_files;
- else
- p->openfds_limits_percent = 0.0;
-
- if(p->openfds_limits_percent > 100.0) {
- if(!(p->log_thrown & PID_LOG_LIMITS_DETAIL)) {
- char *line;
-
- if(!read_limits) {
- proc_pid_limits_buffer[0] = '\0';
- line = "NOT READ";
- }
- else {
- line = strstr(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY);
- if (line) {
- line++; // skip the initial newline
-
- char *end = strchr(line, '\n');
- if (end)
- *end = '\0';
- }
- }
-
- netdata_log_info(
- "FDS_LIMITS: PID %d (%s) is using "
- "%0.2f %% of its fds limits, "
- "open fds = %"PRIu64 "("
- "files = %"PRIu64 ", "
- "pipes = %"PRIu64 ", "
- "sockets = %"PRIu64", "
- "inotifies = %"PRIu64", "
- "eventfds = %"PRIu64", "
- "timerfds = %"PRIu64", "
- "signalfds = %"PRIu64", "
- "eventpolls = %"PRIu64" "
- "other = %"PRIu64" "
- "), open fds limit = %"PRIu64", "
- "%s, "
- "original line [%s]",
- p->pid, p->comm, p->openfds_limits_percent, all_fds,
- p->openfds.files,
- p->openfds.pipes,
- p->openfds.sockets,
- p->openfds.inotifies,
- p->openfds.eventfds,
- p->openfds.timerfds,
- p->openfds.signalfds,
- p->openfds.eventpolls,
- p->openfds.other,
- p->limits.max_open_files,
- read_limits ? "and we have read the limits AFTER counting the fds"
- : "but we have read the limits BEFORE counting the fds",
- line);
-
- p->log_thrown |= PID_LOG_LIMITS_DETAIL;
- }
- }
- else
- p->log_thrown &= ~PID_LOG_LIMITS_DETAIL;
-
- return ret;
-#endif
-}
-
-static inline int read_proc_pid_status(struct pid_stat *p, void *ptr) {
- p->status_vmsize = 0;
- p->status_vmrss = 0;
- p->status_vmshared = 0;
- p->status_rssfile = 0;
- p->status_rssshmem = 0;
- p->status_vmswap = 0;
- p->status_voluntary_ctxt_switches = 0;
- p->status_nonvoluntary_ctxt_switches = 0;
-
-#ifdef __FreeBSD__
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
-
- p->uid = proc_info->ki_uid;
- p->gid = proc_info->ki_groups[0];
- p->status_vmsize = proc_info->ki_size / 1024; // in KiB
- p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB
- // TODO: what about shared and swap memory on FreeBSD?
- return 1;
-#else
- (void)ptr;
-
- static struct arl_callback_ptr arl_ptr;
- static procfile *ff = NULL;
-
- if(unlikely(!p->status_arl)) {
- p->status_arl = arl_create("/proc/pid/status", NULL, 60);
- arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr);
- arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr);
- arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr);
- arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr);
- arl_expect_custom(p->status_arl, "voluntary_ctxt_switches", arl_callback_status_voluntary_ctxt_switches, &arl_ptr);
- arl_expect_custom(p->status_arl, "nonvoluntary_ctxt_switches", arl_callback_status_nonvoluntary_ctxt_switches, &arl_ptr);
- }
-
-
- if(unlikely(!p->status_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid);
- p->status_filename = strdupz(filename);
- }
-
- ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) return 0;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0;
-
- calls_counter++;
-
- // let ARL use this pid
- arl_ptr.p = p;
- arl_ptr.ff = ff;
-
- size_t lines = procfile_lines(ff), l;
- arl_begin(p->status_arl);
-
- for(l = 0; l < lines ;l++) {
- // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1));
- arl_ptr.line = l;
- if(unlikely(arl_check(p->status_arl,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- p->status_vmshared = p->status_rssfile + p->status_rssshmem;
-
- // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared);
-
- return 1;
-#endif
-}
-
-
-// ----------------------------------------------------------------------------
-
-static inline int read_proc_pid_stat(struct pid_stat *p, void *ptr) {
- (void)ptr;
-
-#ifdef __FreeBSD__
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
- if (unlikely(proc_info->ki_tdflags & TDF_IDLETD))
- goto cleanup;
-#else
- static procfile *ff = NULL;
-
- if(unlikely(!p->stat_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
- p->stat_filename = strdupz(filename);
- }
-
- int set_quotes = (!ff)?1:0;
-
- ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) goto cleanup;
-
- // if(set_quotes) procfile_set_quotes(ff, "()");
- if(unlikely(set_quotes))
- procfile_set_open_close(ff, "(", ")");
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-#endif
-
- p->last_stat_collected_usec = p->stat_collected_usec;
- p->stat_collected_usec = now_monotonic_usec();
- calls_counter++;
-
-#ifdef __FreeBSD__
- char *comm = proc_info->ki_comm;
- p->ppid = proc_info->ki_ppid;
-#else
- // p->pid = str2pid_t(procfile_lineword(ff, 0, 0));
- char *comm = procfile_lineword(ff, 0, 1);
- p->state = *(procfile_lineword(ff, 0, 2));
- p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
- // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4));
- // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5));
- // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6));
- // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7));
- // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8));
-#endif
- if(strcmp(p->comm, comm) != 0) {
- if(unlikely(debug_enabled)) {
- if(p->comm[0])
- debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
- else
- debug_log("\tJust added %d (%s)", p->pid, comm);
- }
-
- strncpyz(p->comm, comm, MAX_COMPARE_NAME);
-
- // /proc/<pid>/cmdline
- if(likely(proc_pid_cmdline_is_needed))
- managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
-
- assign_target_to_pid(p);
- }
-
-#ifdef __FreeBSD__
- pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt);
- pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt);
- pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt);
- pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt);
- pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000);
- pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000);
- pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000);
- pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000);
-
- p->num_threads = proc_info->ki_numthreads;
-
- if(enable_guest_charts) {
- enable_guest_charts = 0;
- netdata_log_info("Guest charts aren't supported by FreeBSD");
- }
-#else
- pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9)));
- pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10)));
- pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11)));
- pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12)));
- pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13)));
- pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14)));
- pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15)));
- pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16)));
- // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17));
- // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
- p->num_threads = (int32_t) str2uint32_t(procfile_lineword(ff, 0, 19), NULL);
- // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
- p->collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz;
- p->uptime = (global_uptime > p->collected_starttime)?(global_uptime - p->collected_starttime):0;
- // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
- // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
- // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
- // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25));
- // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26));
- // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27));
- // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28));
- // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29));
- // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30));
- // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31));
- // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32));
- // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33));
- // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34));
- // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35));
- // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36));
- // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37));
- // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38));
- // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39));
- // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40));
- // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41));
-
- if(enable_guest_charts) {
-
- pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42)));
- pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43)));
-
- if (show_guest_time || p->gtime || p->cgtime) {
- p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime;
- p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime;
- show_guest_time = 1;
- }
- }
-#endif
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads);
-
- if(unlikely(global_iterations_counter == 1)) {
- p->minflt = 0;
- p->cminflt = 0;
- p->majflt = 0;
- p->cmajflt = 0;
- p->utime = 0;
- p->stime = 0;
- p->gtime = 0;
- p->cutime = 0;
- p->cstime = 0;
- p->cgtime = 0;
- }
-#ifndef __FreeBSD__
- update_proc_state_count(p->state);
-#endif
- return 1;
-
-cleanup:
- p->minflt = 0;
- p->cminflt = 0;
- p->majflt = 0;
- p->cmajflt = 0;
- p->utime = 0;
- p->stime = 0;
- p->gtime = 0;
- p->cutime = 0;
- p->cstime = 0;
- p->cgtime = 0;
- p->num_threads = 0;
- // p->rss = 0;
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline int read_proc_pid_io(struct pid_stat *p, void *ptr) {
- (void)ptr;
-#ifdef __FreeBSD__
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
-#else
- static procfile *ff = NULL;
-
- if(unlikely(!p->io_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid);
- p->io_filename = strdupz(filename);
- }
-
- // open the file
- ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) goto cleanup;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-#endif
-
- calls_counter++;
-
- p->last_io_collected_usec = p->io_collected_usec;
- p->io_collected_usec = now_monotonic_usec();
-
-#ifdef __FreeBSD__
- pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock);
- pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock);
-#else
- pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1)));
- pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1)));
- pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1)));
- pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1)));
- pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1)));
- pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1)));
- pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1)));
-#endif
-
- if(unlikely(global_iterations_counter == 1)) {
- p->io_logical_bytes_read = 0;
- p->io_logical_bytes_written = 0;
- p->io_read_calls = 0;
- p->io_write_calls = 0;
- p->io_storage_bytes_read = 0;
- p->io_storage_bytes_written = 0;
- p->io_cancelled_write_bytes = 0;
- }
-
- return 1;
-
-#ifndef __FreeBSD__
-cleanup:
- p->io_logical_bytes_read = 0;
- p->io_logical_bytes_written = 0;
- p->io_read_calls = 0;
- p->io_write_calls = 0;
- p->io_storage_bytes_read = 0;
- p->io_storage_bytes_written = 0;
- p->io_cancelled_write_bytes = 0;
- return 0;
-#endif
-}
-
-#ifndef __FreeBSD__
-static inline int read_global_time() {
- static char filename[FILENAME_MAX + 1] = "";
- static procfile *ff = NULL;
- static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
- static usec_t collected_usec = 0, last_collected_usec = 0;
-
- if(unlikely(!ff)) {
- snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
- ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) goto cleanup;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-
- last_collected_usec = collected_usec;
- collected_usec = now_monotonic_usec();
-
- calls_counter++;
-
- // temporary - it is added global_ntime;
- kernel_uint_t global_ntime = 0;
-
- incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec);
- incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec);
-
- global_utime += global_ntime;
-
- if(enable_guest_charts) {
- // temporary - it is added global_ntime;
- kernel_uint_t global_gntime = 0;
-
- // guest nice time, on guest time
- incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec);
-
- global_gtime += global_gntime;
-
- // remove guest time from user time
- global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
- }
-
- if(unlikely(global_iterations_counter == 1)) {
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- }
-
- return 1;
-
-cleanup:
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- return 0;
-}
-#else
-static inline int read_global_time() {
- static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
- static usec_t collected_usec = 0, last_collected_usec = 0;
- long cp_time[CPUSTATES];
-
- if (unlikely(CPUSTATES != 5)) {
- goto cleanup;
- } else {
- static int mib[2] = {0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
- goto cleanup;
- }
- }
-
- last_collected_usec = collected_usec;
- collected_usec = now_monotonic_usec();
-
- calls_counter++;
-
- // temporary - it is added global_ntime;
- kernel_uint_t global_ntime = 0;
-
- incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec);
-
- global_utime += global_ntime;
-
- if(unlikely(global_iterations_counter == 1)) {
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- }
-
- return 1;
-
-cleanup:
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- return 0;
-}
-#endif /* !__FreeBSD__ */
-
-// ----------------------------------------------------------------------------
-
-int file_descriptor_compare(void* a, void* b) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE)
- netdata_log_error("Corrupted index data detected. Please report this.");
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash)
- return -1;
-
- else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash)
- return 1;
-
- else
- return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name);
-}
-
-// int file_descriptor_iterator(avl_t *a) { if(a) {}; return 0; }
-
-avl_tree_type all_files_index = {
- NULL,
- file_descriptor_compare
-};
-
-static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) {
- struct file_descriptor tmp;
- tmp.hash = (hash)?hash:simple_hash(name);
- tmp.name = name;
- tmp.count = 0;
- tmp.pos = 0;
-#ifdef NETDATA_INTERNAL_CHECKS
- tmp.magic = 0x0BADCAFE;
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- return (struct file_descriptor *)avl_search(&all_files_index, (avl_t *) &tmp);
-}
-
-#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl_t *)(fd))
-#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl_t *)(fd))
-
-// ----------------------------------------------------------------------------
-
-static inline void file_descriptor_not_used(int id)
-{
- if(id > 0 && id < all_files_size) {
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(all_files[id].magic != 0x0BADCAFE) {
- netdata_log_error("Ignoring request to remove empty file id %d.", id);
- return;
- }
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- debug_log("decreasing slot %d (count = %d).", id, all_files[id].count);
-
- if(all_files[id].count > 0) {
- all_files[id].count--;
-
- if(!all_files[id].count) {
- debug_log(" >> slot %d is empty.", id);
-
- if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id]))
- netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd");
-
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[id].magic = 0x00000000;
-#endif /* NETDATA_INTERNAL_CHECKS */
- all_files_len--;
- }
- }
- else
- netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0",
- id,
- all_files[id].name);
- }
- else
- netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)",
- id,
- all_files_size);
-}
-
-static inline void all_files_grow() {
- void *old = all_files;
- int i;
-
- // there is no empty slot
- debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP);
-
- all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor));
-
- // if the address changed, we have to rebuild the index
- // since all pointers are now invalid
-
- if(unlikely(old && old != (void *)all_files)) {
- debug_log(" >> re-indexing.");
-
- all_files_index.root = NULL;
- for(i = 0; i < all_files_size; i++) {
- if(!all_files[i].count) continue;
- if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i]))
- netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc.");
- }
-
- debug_log(" >> re-indexing done.");
- }
-
- // initialize the newly added entries
-
- for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) {
- all_files[i].count = 0;
- all_files[i].name = NULL;
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[i].magic = 0x00000000;
-#endif /* NETDATA_INTERNAL_CHECKS */
- all_files[i].pos = i;
- }
-
- if(unlikely(!all_files_size)) all_files_len = 1;
- all_files_size += FILE_DESCRIPTORS_INCREASE_STEP;
-}
-
-static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) {
- // check we have enough memory to add it
- if(!all_files || all_files_len == all_files_size)
- all_files_grow();
-
- debug_log(" >> searching for empty slot.");
-
- // search for an empty slot
-
- static int last_pos = 0;
- int i, c;
- for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) {
- if(c >= all_files_size) c = 0;
- if(c == 0) continue;
-
- if(!all_files[c].count) {
- debug_log(" >> Examining slot %d.", c);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash))
- netdata_log_error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name);
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name);
-
- freez((void *)all_files[c].name);
- all_files[c].name = NULL;
- last_pos = c;
- break;
- }
- }
-
- all_files_len++;
-
- if(i == all_files_size) {
- fatal("We should find an empty slot, but there isn't any");
- exit(1);
- }
- // else we have an empty slot in 'c'
-
- debug_log(" >> updating slot %d.", c);
-
- all_files[c].name = strdupz(name);
- all_files[c].hash = hash;
- all_files[c].type = type;
- all_files[c].pos = c;
- all_files[c].count = 1;
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[c].magic = 0x0BADCAFE;
-#endif /* NETDATA_INTERNAL_CHECKS */
- if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c]))
- netdata_log_error("INTERNAL ERROR: duplicate indexing of fd.");
-
- debug_log("using fd position %d (name: %s)", c, all_files[c].name);
-
- return c;
-}
-
-static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) {
- if(unlikely(!hash))
- hash = simple_hash(name);
-
- debug_log("adding or finding name '%s' with hash %u", name, hash);
-
- struct file_descriptor *fd = file_descriptor_find(name, hash);
- if(fd) {
- // found
- debug_log(" >> found on slot %d", fd->pos);
-
- fd->count++;
- return fd->pos;
- }
- // not found
-
- FD_FILETYPE type;
- if(likely(name[0] == '/')) type = FILETYPE_FILE;
- else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE;
- else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET;
- else if(likely(strncmp(name, "anon_inode:", 11) == 0)) {
- const char *t = &name[11];
-
- if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY;
- else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD;
- else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL;
- else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD;
- else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD;
- else {
- debug_log("UNKNOWN anonymous inode: %s", name);
- type = FILETYPE_OTHER;
- }
- }
- else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY;
- else {
- debug_log("UNKNOWN linkname: %s", name);
- type = FILETYPE_OTHER;
- }
-
- return file_descriptor_set_on_empty_slot(name, hash, type);
-}
-
-static inline void clear_pid_fd(struct pid_fd *pfd) {
- pfd->fd = 0;
-
- #ifndef __FreeBSD__
- pfd->link_hash = 0;
- pfd->inode = 0;
- pfd->cache_iterations_counter = 0;
- pfd->cache_iterations_reset = 0;
-#endif
-}
-
-static inline void make_all_pid_fds_negative(struct pid_stat *p) {
- struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
- while(pfd < pfdend) {
- pfd->fd = -(pfd->fd);
- pfd++;
- }
-}
-
-static inline void cleanup_negative_pid_fds(struct pid_stat *p) {
- struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
-
- while(pfd < pfdend) {
- int fd = pfd->fd;
-
- if(unlikely(fd < 0)) {
- file_descriptor_not_used(-(fd));
- clear_pid_fd(pfd);
- }
-
- pfd++;
- }
-}
-
-static inline void init_pid_fds(struct pid_stat *p, size_t first, size_t size) {
- struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size];
-
- while(pfd < pfdend) {
-#ifndef __FreeBSD__
- pfd->filename = NULL;
-#endif
- clear_pid_fd(pfd);
- pfd++;
- }
-}
-
-static inline int read_pid_file_descriptors(struct pid_stat *p, void *ptr) {
- (void)ptr;
-#ifdef __FreeBSD__
- int mib[4];
- size_t size;
- struct kinfo_file *fds;
- static char *fdsbuf;
- char *bfdsbuf, *efdsbuf;
- char fdsname[FILENAME_MAX + 1];
-#define SHM_FORMAT_LEN 31 // format: 21 + size: 10
- char shm_name[FILENAME_MAX - SHM_FORMAT_LEN + 1];
-
- // we make all pid fds negative, so that
- // we can detect unused file descriptors
- // at the end, to free them
- make_all_pid_fds_negative(p);
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_FILEDESC;
- mib[3] = p->pid;
-
- if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid);
- return 0;
- }
- if (likely(size > 0))
- fdsbuf = reallocz(fdsbuf, size);
- if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid);
- return 0;
- }
-
- bfdsbuf = fdsbuf;
- efdsbuf = fdsbuf + size;
- while (bfdsbuf < efdsbuf) {
- fds = (struct kinfo_file *)(uintptr_t)bfdsbuf;
- if (unlikely(fds->kf_structsize == 0))
- break;
-
- // do not process file descriptors for current working directory, root directory,
- // jail directory, ktrace vnode, text vnode and controlling terminal
- if (unlikely(fds->kf_fd < 0)) {
- bfdsbuf += fds->kf_structsize;
- continue;
- }
-
- // get file descriptors array index
- size_t fdid = fds->kf_fd;
-
- // check if the fds array is small
- if (unlikely(fdid >= p->fds_size)) {
- // it is small, extend it
-
- debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS);
-
- p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
-
- // and initialize it
- init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
- p->fds_size = fdid + MAX_SPARE_FDS;
- }
-
- if (unlikely(p->fds[fdid].fd == 0)) {
- // we don't know this fd, get it
-
- switch (fds->kf_type) {
- case KF_TYPE_FIFO:
- case KF_TYPE_VNODE:
- if (unlikely(!fds->kf_path[0])) {
- sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid);
- break;
- }
- sprintf(fdsname, "%s", fds->kf_path);
- break;
- case KF_TYPE_SOCKET:
- switch (fds->kf_sock_domain) {
- case AF_INET:
- case AF_INET6:
- if (fds->kf_sock_protocol == IPPROTO_TCP)
- sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb);
- else
- sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb);
- break;
- case AF_UNIX:
- /* print address of pcb and connected pcb */
- sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn);
- break;
- default:
- /* print protocol number and socket address */
-#if __FreeBSD_version < 1200031
- sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2);
-#else
- sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2);
-#endif
- }
- break;
- case KF_TYPE_PIPE:
- sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer);
- break;
- case KF_TYPE_PTS:
-#if __FreeBSD_version < 1200031
- sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev);
-#else
- sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev);
-#endif
- break;
- case KF_TYPE_SHM:
- strncpyz(shm_name, fds->kf_path, FILENAME_MAX - SHM_FORMAT_LEN);
- sprintf(fdsname, "other: shm: %s size: %lu", shm_name, fds->kf_un.kf_file.kf_file_size);
- break;
- case KF_TYPE_SEM:
- sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value);
- break;
- default:
- sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd);
- }
-
- // if another process already has this, we will get
- // the same id
- p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0);
- }
-
- // else make it positive again, we need it
- // of course, the actual file may have changed
-
- else
- p->fds[fdid].fd = -p->fds[fdid].fd;
-
- bfdsbuf += fds->kf_structsize;
- }
-#else
- if(unlikely(!p->fds_dirname)) {
- char dirname[FILENAME_MAX+1];
- snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid);
- p->fds_dirname = strdupz(dirname);
- }
-
- DIR *fds = opendir(p->fds_dirname);
- if(unlikely(!fds)) return 0;
-
- struct dirent *de;
- char linkname[FILENAME_MAX + 1];
-
- // we make all pid fds negative, so that
- // we can detect unused file descriptors
- // at the end, to free them
- make_all_pid_fds_negative(p);
-
- while((de = readdir(fds))) {
- // we need only files with numeric names
-
- if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9'))
- continue;
-
- // get its number
- int fdid = (int) str2l(de->d_name);
- if(unlikely(fdid < 0)) continue;
-
- // check if the fds array is small
- if(unlikely((size_t)fdid >= p->fds_size)) {
- // it is small, extend it
-
- debug_log("extending fd memory slots for %s from %d to %d"
- , p->comm
- , p->fds_size
- , fdid + MAX_SPARE_FDS
- );
-
- p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
-
- // and initialize it
- init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
- p->fds_size = (size_t)fdid + MAX_SPARE_FDS;
- }
-
- if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) {
- // inodes do not match, clear the previous entry
- inodes_changed_counter++;
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) {
- p->fds[fdid].fd = -p->fds[fdid].fd;
- p->fds[fdid].cache_iterations_counter--;
- continue;
- }
-
- if(unlikely(!p->fds[fdid].filename)) {
- filenames_allocated_counter++;
- char fdname[FILENAME_MAX + 1];
- snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name);
- p->fds[fdid].filename = strdupz(fdname);
- }
-
- file_counter++;
- ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX);
- if(unlikely(l == -1)) {
- // cannot read the link
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- netdata_log_error("Cannot read link %s", p->fds[fdid].filename);
-
- if(unlikely(p->fds[fdid].fd < 0)) {
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- continue;
- }
- else
- linkname[l] = '\0';
-
- uint32_t link_hash = simple_hash(linkname);
-
- if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) {
- // the link changed
- links_changed_counter++;
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- if(unlikely(p->fds[fdid].fd == 0)) {
- // we don't know this fd, get it
-
- // if another process already has this, we will get
- // the same id
- p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash);
- p->fds[fdid].inode = de->d_ino;
- p->fds[fdid].link_hash = link_hash;
- }
- else {
- // else make it positive again, we need it
- p->fds[fdid].fd = -p->fds[fdid].fd;
- }
-
- // caching control
- // without this we read all the files on every iteration
- if(max_fds_cache_seconds > 0) {
- size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds;
-
- // cache it for a few iterations
- size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every;
- p->fds[fdid].cache_iterations_reset++;
-
- if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread))
- p->fds[fdid].cache_iterations_reset++;
-
- if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) ||
- p->fds[fdid].cache_iterations_reset > max)) {
- // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max
- p->fds[fdid].cache_iterations_reset = max;
- }
-
- p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset;
- }
- }
-
- closedir(fds);
-#endif
- cleanup_negative_pid_fds(p);
-
- return 1;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) {
- char *prefix = "\\_ ";
- int indent = 0;
-
- if(p->parent)
- indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec);
- else
- prefix = " > ";
-
- char buffer[indent + 1];
- int i;
-
- for(i = 0; i < indent ;i++) buffer[i] = ' ';
- buffer[i] = '\0';
-
- fprintf(stderr, " %s %s%s (%d %s %"PRIu64""
- , buffer
- , prefix
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , p->stat_collected_usec - time
- );
-
- if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime);
- if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime);
- if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime);
- if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime);
- if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime);
- if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime);
- if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt);
- if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt);
- if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt);
- if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt);
- fprintf(stderr, ")\n");
-
- return indent + 1;
-}
-
-static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) {
- debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited");
- debug_print_process_and_parents(p, p->stat_collected_usec);
-}
-
-static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) {
- int found = 0;
- struct pid_stat *p = NULL;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(p == pe) continue;
-
- switch(type) {
- case 1:
- if(p->cminflt > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 2:
- if(p->cmajflt > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 3:
- if(p->cutime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 4:
- if(p->cstime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 5:
- if(p->cgtime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
- }
- }
-
- if(!found) {
- switch(type) {
- case 1:
- fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 2:
- fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 3:
- fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 4:
- fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 5:
- fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
- }
- }
-}
-
-static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) {
- kernel_uint_t absorbed = 0;
-
- if(*field > *pfield) {
- absorbed += *pfield;
- *field -= *pfield;
- *pfield = 0;
- }
- else {
- absorbed += *field;
- *pfield -= *field;
- *field = 0;
- }
-
- return absorbed;
-}
-
-static inline void process_exited_processes() {
- struct pid_stat *p;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(p->updated || !p->stat_collected_usec)
- continue;
-
- kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
-
- if(utime + stime + gtime + minflt + majflt == 0)
- continue;
-
- if(unlikely(debug_enabled)) {
- debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , utime
- , stime
- , gtime
- , minflt
- , majflt
- );
- debug_print_process_tree(p, "Searching parents");
- }
-
- struct pid_stat *pp;
- for(pp = p->parent; pp ; pp = pp->parent) {
- if(!pp->updated) continue;
-
- kernel_uint_t absorbed;
- absorbed = remove_exited_child_from_parent(&utime, &pp->cutime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime);
-
- absorbed = remove_exited_child_from_parent(&stime, &pp->cstime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime);
-
- absorbed = remove_exited_child_from_parent(&gtime, &pp->cgtime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime);
-
- absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt);
-
- absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt);
- }
-
- if(unlikely(utime + stime + gtime + minflt + majflt > 0)) {
- if(unlikely(debug_enabled)) {
- if(utime) debug_find_lost_child(p, utime, 3);
- if(stime) debug_find_lost_child(p, stime, 4);
- if(gtime) debug_find_lost_child(p, gtime, 5);
- if(minflt) debug_find_lost_child(p, minflt, 1);
- if(majflt) debug_find_lost_child(p, majflt, 2);
- }
-
- p->keep = true;
-
- debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , utime
- , stime
- , gtime
- , minflt
- , majflt
- );
-
- for(pp = p->parent; pp ; pp = pp->parent) {
- if(pp->updated) break;
- pp->keep = true;
-
- debug_log(" > - KEEP - parent for another loop: %s (%d %s)"
- , pp->comm
- , pp->pid
- , pp->updated?"running":"exited"
- );
- }
-
- p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0;
-
- debug_log(" ");
- }
- else
- debug_log(" > totally absorbed - DONE - %s (%d %s)"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- );
- }
-}
-
-static inline void link_all_processes_to_their_parents(void) {
- struct pid_stat *p, *pp;
-
- // link all children to their parents
- // and update children count on parents
- for(p = root_of_pids; p ; p = p->next) {
- // for each process found
-
- p->sortlist = 0;
- p->parent = NULL;
-
- if(unlikely(!p->ppid)) {
- //unnecessary code from apps_plugin.c
- //p->parent = NULL;
- continue;
- }
-
- pp = all_pids[p->ppid];
- if(likely(pp)) {
- p->parent = pp;
- pp->children_count++;
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt);
- }
- else {
- p->parent = NULL;
- netdata_log_error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-// 1. read all files in /proc
-// 2. for each numeric directory:
-// i. read /proc/pid/stat
-// ii. read /proc/pid/status
-// iii. read /proc/pid/io (requires root access)
-// iii. read the entries in directory /proc/pid/fd (requires root access)
-// for each entry:
-// a. find or create a struct file_descriptor
-// b. cleanup any old/unused file_descriptors
-
-// after all these, some pids may be linked to targets, while others may not
-
-// in case of errors, only 1 every 1000 errors is printed
-// to avoid filling up all disk space
-// if debug is enabled, all errors are printed
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
-static int compar_pid(const void *pid1, const void *pid2) {
-
- struct pid_stat *p1 = all_pids[*((pid_t *)pid1)];
- struct pid_stat *p2 = all_pids[*((pid_t *)pid2)];
-
- if(p1->sortlist > p2->sortlist)
- return -1;
- else
- return 1;
-}
-#endif
-
-static inline int collect_data_for_pid(pid_t pid, void *ptr) {
- if(unlikely(pid < 0 || pid > pid_max)) {
- netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
- return 0;
- }
-
- struct pid_stat *p = get_pid_entry(pid);
- if(unlikely(!p || p->read)) return 0;
- p->read = true;
-
- // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist);
-
- // --------------------------------------------------------------------
- // /proc/<pid>/stat
-
- if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
- // there is no reason to proceed if we cannot get its status
- return 0;
-
- // check its parent pid
- if(unlikely(p->ppid < 0 || p->ppid > pid_max)) {
- netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
- p->ppid = 0;
- }
-
- // --------------------------------------------------------------------
- // /proc/<pid>/io
-
- managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr));
-
- // --------------------------------------------------------------------
- // /proc/<pid>/status
-
- if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr))))
- // there is no reason to proceed if we cannot get its status
- return 0;
-
- // --------------------------------------------------------------------
- // /proc/<pid>/fd
-
- if(enable_file_charts) {
- managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr));
- managed_log(p, PID_LOG_LIMITS, read_proc_pid_limits(p, ptr));
- }
-
- // --------------------------------------------------------------------
- // done!
-
- if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read))
- debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist);
-
- // mark it as updated
- p->updated = true;
- p->keep = false;
- p->keeploops = 0;
-
- return 1;
-}
-
-static int collect_data_for_all_processes(void) {
- struct pid_stat *p = NULL;
-
-#ifndef __FreeBSD__
- // clear process state counter
- memset(proc_state_count, 0, sizeof proc_state_count);
-#else
- int i, procnum;
-
- static size_t procbase_size = 0;
- static struct kinfo_proc *procbase = NULL;
-
- size_t new_procbase_size;
-
- int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
- if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get processes data size");
- return 0;
- }
-
- // give it some air for processes that may be started
- // during this little time.
- new_procbase_size += 100 * sizeof(struct kinfo_proc);
-
- // increase the buffer if needed
- if(new_procbase_size > procbase_size) {
- procbase_size = new_procbase_size;
- procbase = reallocz(procbase, procbase_size);
- }
-
- // sysctl() gets from new_procbase_size the buffer size
- // and also returns to it the amount of data filled in
- new_procbase_size = procbase_size;
-
- // get the processes from the system
- if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get processes data");
- return 0;
- }
-
- // based on the amount of data filled in
- // calculate the number of processes we got
- procnum = new_procbase_size / sizeof(struct kinfo_proc);
-
-#endif
-
- if(all_pids_count) {
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
- size_t slc = 0;
-#endif
- for(p = root_of_pids; p ; p = p->next) {
- p->read = false; // mark it as not read, so that collect_data_for_pid() will read it
- p->updated = false;
- p->merged = false;
- p->children_count = 0;
- p->parent = NULL;
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
- all_pids_sortlist[slc++] = p->pid;
-#endif
- }
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
- if(unlikely(slc != all_pids_count)) {
- netdata_log_error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc);
- all_pids_count = slc;
- }
-
- if(include_exited_childs) {
- // Read parents before childs
- // This is needed to prevent a situation where
- // a child is found running, but until we read
- // its parent, it has exited and its parent
- // has accumulated its resources.
-
- qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid);
-
- // we forward read all running processes
- // collect_data_for_pid() is smart enough,
- // not to read the same pid twice per iteration
- for(slc = 0; slc < all_pids_count; slc++) {
- collect_data_for_pid(all_pids_sortlist[slc], NULL);
- }
- }
-#endif
- }
-
-#ifdef __FreeBSD__
- for (i = 0 ; i < procnum ; ++i) {
- pid_t pid = procbase[i].ki_pid;
- collect_data_for_pid(pid, &procbase[i]);
- }
-#else
- static char uptime_filename[FILENAME_MAX + 1] = "";
- if(*uptime_filename == '\0')
- snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix);
-
- global_uptime = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC);
-
- char dirname[FILENAME_MAX + 1];
-
- snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
- DIR *dir = opendir(dirname);
- if(!dir) return 0;
-
- struct dirent *de = NULL;
-
- while((de = readdir(dir))) {
- char *endptr = de->d_name;
-
- if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
- continue;
-
- pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10);
-
- // make sure we read a valid number
- if(unlikely(endptr == de->d_name || *endptr != '\0'))
- continue;
-
- collect_data_for_pid(pid, NULL);
- }
- closedir(dir);
-#endif
-
- if(!all_pids_count)
- return 0;
-
- // we need /proc/stat to normalize the cpu consumption of the exited childs
- read_global_time();
-
- // build the process tree
- link_all_processes_to_their_parents();
-
- // normally this is done
- // however we may have processes exited while we collected values
- // so let's find the exited ones
- // we do this by collecting the ownership of process
- // if we manage to get the ownership, the process still runs
- process_exited_processes();
- return 1;
-}
-
-// ----------------------------------------------------------------------------
-// update statistics on the targets
-
-// 1. link all childs to their parents
-// 2. go from bottom to top, marking as merged all childs to their parents
-// this step links all parents without a target to the child target, if any
-// 3. link all top level processes (the ones not merged) to the default target
-// 4. go from top to bottom, linking all childs without a target, to their parent target
-// after this step, all processes have a target
-// [5. for each killed pid (updated = 0), remove its usage from its target]
-// 6. zero all apps_groups_targets
-// 7. concentrate all values on the apps_groups_targets
-// 8. remove all killed processes
-// 9. find the unique file count for each target
-// check: update_apps_groups_statistics()
-
-static void cleanup_exited_pids(void) {
- size_t c;
- struct pid_stat *p = NULL;
-
- for(p = root_of_pids; p ;) {
- if(!p->updated && (!p->keep || p->keeploops > 0)) {
- if(unlikely(debug_enabled && (p->keep || p->keeploops)))
- debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
-
- for(c = 0; c < p->fds_size; c++)
- if(p->fds[c].fd > 0) {
- file_descriptor_not_used(p->fds[c].fd);
- clear_pid_fd(&p->fds[c]);
- }
-
- pid_t r = p->pid;
- p = p->next;
- del_pid_entry(r);
- }
- else {
- if(unlikely(p->keep)) p->keeploops++;
- p->keep = false;
- p = p->next;
- }
- }
-}
-
-static void apply_apps_groups_targets_inheritance(void) {
- struct pid_stat *p = NULL;
-
- // children that do not have a target
- // inherit their target from their parent
- int found = 1, loops = 0;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
- for(p = root_of_pids; p ; p = p->next) {
- // if this process does not have a target,
- // and it has a parent
- // and its parent has a target
- // then, set the parent's target to this process
- if(unlikely(!p->target && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- // find all the procs with 0 childs and merge them to their parents
- // repeat, until nothing more can be done.
- int sortlist = 1;
- found = 1;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(unlikely(!p->sortlist && !p->children_count))
- p->sortlist = sortlist++;
-
- if(unlikely(
- !p->children_count // if this process does not have any children
- && !p->merged // and is not already merged
- && p->parent // and has a parent
- && p->parent->children_count // and its parent has children
- // and the target of this process and its parent is the same,
- // or the parent does not have a target
- && (p->target == p->parent->target || !p->parent->target)
- && p->ppid != INIT_PID // and its parent is not init
- )) {
- // mark it as merged
- p->parent->children_count--;
- p->merged = true;
-
- // the parent inherits the child's target, if it does not have a target itself
- if(unlikely(p->target && !p->parent->target)) {
- p->parent->target = p->target;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm);
- }
-
- found++;
- }
- }
-
- debug_log("TARGET INHERITANCE: merged %d processes", found);
- }
-
- // init goes always to default target
- if(all_pids[INIT_PID] && !all_pids[INIT_PID]->matched_by_config)
- all_pids[INIT_PID]->target = apps_groups_default_target;
-
- // pid 0 goes always to default target
- if(all_pids[0] && !all_pids[INIT_PID]->matched_by_config)
- all_pids[0]->target = apps_groups_default_target;
-
- // give a default target on all top level processes
- if(unlikely(debug_enabled)) loops++;
- for(p = root_of_pids; p ; p = p->next) {
- // if the process is not merged itself
- // then it is a top level process
- if(unlikely(!p->merged && !p->target))
- p->target = apps_groups_default_target;
-
- // make sure all processes have a sortlist
- if(unlikely(!p->sortlist))
- p->sortlist = sortlist++;
- }
-
- if(all_pids[1])
- all_pids[1]->sortlist = sortlist++;
-
- // give a target to all merged child processes
- found = 1;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
- for(p = root_of_pids; p ; p = p->next) {
- if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
-}
-
-static size_t zero_all_targets(struct target *root) {
- struct target *w;
- size_t count = 0;
-
- for (w = root; w ; w = w->next) {
- count++;
-
- w->minflt = 0;
- w->majflt = 0;
- w->utime = 0;
- w->stime = 0;
- w->gtime = 0;
- w->cminflt = 0;
- w->cmajflt = 0;
- w->cutime = 0;
- w->cstime = 0;
- w->cgtime = 0;
- w->num_threads = 0;
- // w->rss = 0;
- w->processes = 0;
-
- w->status_vmsize = 0;
- w->status_vmrss = 0;
- w->status_vmshared = 0;
- w->status_rssfile = 0;
- w->status_rssshmem = 0;
- w->status_vmswap = 0;
- w->status_voluntary_ctxt_switches = 0;
- w->status_nonvoluntary_ctxt_switches = 0;
-
- w->io_logical_bytes_read = 0;
- w->io_logical_bytes_written = 0;
- w->io_read_calls = 0;
- w->io_write_calls = 0;
- w->io_storage_bytes_read = 0;
- w->io_storage_bytes_written = 0;
- w->io_cancelled_write_bytes = 0;
-
- // zero file counters
- if(w->target_fds) {
- memset(w->target_fds, 0, sizeof(int) * w->target_fds_size);
- w->openfds.files = 0;
- w->openfds.pipes = 0;
- w->openfds.sockets = 0;
- w->openfds.inotifies = 0;
- w->openfds.eventfds = 0;
- w->openfds.timerfds = 0;
- w->openfds.signalfds = 0;
- w->openfds.eventpolls = 0;
- w->openfds.other = 0;
-
- w->max_open_files_percent = 0.0;
- }
-
- w->collected_starttime = 0;
- w->uptime_min = 0;
- w->uptime_sum = 0;
- w->uptime_max = 0;
-
- if(unlikely(w->root_pid)) {
- struct pid_on_target *pid_on_target_to_free, *pid_on_target = w->root_pid;
-
- while(pid_on_target) {
- pid_on_target_to_free = pid_on_target;
- pid_on_target = pid_on_target->next;
- freez(pid_on_target_to_free);
- }
-
- w->root_pid = NULL;
- }
- }
-
- return count;
-}
-
-static inline void reallocate_target_fds(struct target *w) {
- if(unlikely(!w))
- return;
-
- if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) {
- w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size);
- memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size));
- w->target_fds_size = all_files_size;
- }
-}
-
-static void aggregage_fd_type_on_openfds(FD_FILETYPE type, struct openfds *openfds) {
- switch(type) {
- case FILETYPE_FILE:
- openfds->files++;
- break;
-
- case FILETYPE_PIPE:
- openfds->pipes++;
- break;
-
- case FILETYPE_SOCKET:
- openfds->sockets++;
- break;
-
- case FILETYPE_INOTIFY:
- openfds->inotifies++;
- break;
-
- case FILETYPE_EVENTFD:
- openfds->eventfds++;
- break;
-
- case FILETYPE_TIMERFD:
- openfds->timerfds++;
- break;
-
- case FILETYPE_SIGNALFD:
- openfds->signalfds++;
- break;
-
- case FILETYPE_EVENTPOLL:
- openfds->eventpolls++;
- break;
-
- case FILETYPE_OTHER:
- openfds->other++;
- break;
- }
-}
-
-static inline void aggregate_fd_on_target(int fd, struct target *w) {
- if(unlikely(!w))
- return;
-
- if(unlikely(w->target_fds[fd])) {
- // it is already aggregated
- // just increase its usage counter
- w->target_fds[fd]++;
- return;
- }
-
- // increase its usage counter
- // so that we will not add it again
- w->target_fds[fd]++;
-
- aggregage_fd_type_on_openfds(all_files[fd].type, &w->openfds);
-}
-
-static inline void aggregate_pid_fds_on_targets(struct pid_stat *p) {
-
- if(unlikely(!p->updated)) {
- // the process is not running
- return;
- }
-
- struct target *w = p->target, *u = p->user_target, *g = p->group_target;
-
- reallocate_target_fds(w);
- reallocate_target_fds(u);
- reallocate_target_fds(g);
-
- p->openfds.files = 0;
- p->openfds.pipes = 0;
- p->openfds.sockets = 0;
- p->openfds.inotifies = 0;
- p->openfds.eventfds = 0;
- p->openfds.timerfds = 0;
- p->openfds.signalfds = 0;
- p->openfds.eventpolls = 0;
- p->openfds.other = 0;
-
- long currentfds = 0;
- size_t c, size = p->fds_size;
- struct pid_fd *fds = p->fds;
- for(c = 0; c < size ;c++) {
- int fd = fds[c].fd;
-
- if(likely(fd <= 0 || fd >= all_files_size))
- continue;
-
- currentfds++;
- aggregage_fd_type_on_openfds(all_files[fd].type, &p->openfds);
-
- aggregate_fd_on_target(fd, w);
- aggregate_fd_on_target(fd, u);
- aggregate_fd_on_target(fd, g);
- }
-}
-
-static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) {
- (void)o;
-
- if(unlikely(!p->updated)) {
- // the process is not running
- return;
- }
-
- if(unlikely(!w)) {
- netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm);
- return;
- }
-
- if(p->openfds_limits_percent > w->max_open_files_percent)
- w->max_open_files_percent = p->openfds_limits_percent;
-
- w->cutime += p->cutime;
- w->cstime += p->cstime;
- w->cgtime += p->cgtime;
- w->cminflt += p->cminflt;
- w->cmajflt += p->cmajflt;
-
- w->utime += p->utime;
- w->stime += p->stime;
- w->gtime += p->gtime;
- w->minflt += p->minflt;
- w->majflt += p->majflt;
-
- // w->rss += p->rss;
-
- w->status_vmsize += p->status_vmsize;
- w->status_vmrss += p->status_vmrss;
- w->status_vmshared += p->status_vmshared;
- w->status_rssfile += p->status_rssfile;
- w->status_rssshmem += p->status_rssshmem;
- w->status_vmswap += p->status_vmswap;
- w->status_voluntary_ctxt_switches += p->status_voluntary_ctxt_switches;
- w->status_nonvoluntary_ctxt_switches += p->status_nonvoluntary_ctxt_switches;
-
- w->io_logical_bytes_read += p->io_logical_bytes_read;
- w->io_logical_bytes_written += p->io_logical_bytes_written;
- w->io_read_calls += p->io_read_calls;
- w->io_write_calls += p->io_write_calls;
- w->io_storage_bytes_read += p->io_storage_bytes_read;
- w->io_storage_bytes_written += p->io_storage_bytes_written;
- w->io_cancelled_write_bytes += p->io_cancelled_write_bytes;
-
- w->processes++;
- w->num_threads += p->num_threads;
-
- if(!w->collected_starttime || p->collected_starttime < w->collected_starttime) w->collected_starttime = p->collected_starttime;
- if(!w->uptime_min || p->uptime < w->uptime_min) w->uptime_min = p->uptime;
- w->uptime_sum += p->uptime;
- if(!w->uptime_max || w->uptime_max < p->uptime) w->uptime_max = p->uptime;
-
- if(unlikely(debug_enabled || w->debug_enabled)) {
- debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt);
-
- struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
- pid_on_target->pid = p->pid;
- pid_on_target->next = w->root_pid;
- w->root_pid = pid_on_target;
- }
-}
-
-static inline void post_aggregate_targets(struct target *root) {
- struct target *w;
- for (w = root; w ; w = w->next) {
- if(w->collected_starttime) {
- if (!w->starttime || w->collected_starttime < w->starttime) {
- w->starttime = w->collected_starttime;
- }
- } else {
- w->starttime = 0;
- }
- }
-}
-
-static void calculate_netdata_statistics(void) {
-
- apply_apps_groups_targets_inheritance();
-
- zero_all_targets(users_root_target);
- zero_all_targets(groups_root_target);
- apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
-
- // this has to be done, before the cleanup
- struct pid_stat *p = NULL;
- struct target *w = NULL, *o = NULL;
-
- // concentrate everything on the targets
- for(p = root_of_pids; p ; p = p->next) {
-
- // --------------------------------------------------------------------
- // apps_groups target
-
- aggregate_pid_on_target(p->target, p, NULL);
-
-
- // --------------------------------------------------------------------
- // user target
-
- o = p->user_target;
- if(likely(p->user_target && p->user_target->uid == p->uid))
- w = p->user_target;
- else {
- if(unlikely(debug_enabled && p->user_target))
- debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid);
-
- w = p->user_target = get_users_target(p->uid);
- }
-
- aggregate_pid_on_target(w, p, o);
-
-
- // --------------------------------------------------------------------
- // user group target
-
- o = p->group_target;
- if(likely(p->group_target && p->group_target->gid == p->gid))
- w = p->group_target;
- else {
- if(unlikely(debug_enabled && p->group_target))
- debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid);
-
- w = p->group_target = get_groups_target(p->gid);
- }
-
- aggregate_pid_on_target(w, p, o);
-
-
- // --------------------------------------------------------------------
- // aggregate all file descriptors
-
- if(enable_file_charts)
- aggregate_pid_fds_on_targets(p);
- }
-
- post_aggregate_targets(apps_groups_root_target);
- post_aggregate_targets(users_root_target);
- post_aggregate_targets(groups_root_target);
-
- cleanup_exited_pids();
-}
-
-// ----------------------------------------------------------------------------
-// update chart dimensions
-
-static inline void send_BEGIN(const char *type, const char *name,const char *metric, usec_t usec) {
- fprintf(stdout, "BEGIN %s.%s_%s %" PRIu64 "\n", type, name, metric, usec);
-}
-
-static inline void send_SET(const char *name, kernel_uint_t value) {
- fprintf(stdout, "SET %s = " KERNEL_UINT_FORMAT "\n", name, value);
-}
-
-static inline void send_END(void) {
- fprintf(stdout, "END\n\n");
-}
-
-void send_resource_usage_to_netdata(usec_t dt) {
- static struct timeval last = { 0, 0 };
- static struct rusage me_last;
-
- struct timeval now;
- struct rusage me;
-
- usec_t cpuuser;
- usec_t cpusyst;
-
- if(!last.tv_sec) {
- now_monotonic_timeval(&last);
- getrusage(RUSAGE_SELF, &me_last);
-
- cpuuser = 0;
- cpusyst = 0;
- }
- else {
- now_monotonic_timeval(&now);
- getrusage(RUSAGE_SELF, &me);
-
- cpuuser = me.ru_utime.tv_sec * USEC_PER_SEC + me.ru_utime.tv_usec;
- cpusyst = me.ru_stime.tv_sec * USEC_PER_SEC + me.ru_stime.tv_usec;
-
- memmove(&last, &now, sizeof(struct timeval));
- memmove(&me_last, &me, sizeof(struct rusage));
- }
-
- static char created_charts = 0;
- if(unlikely(!created_charts)) {
- created_charts = 1;
-
- fprintf(stdout,
- "CHART netdata.apps_cpu '' 'Apps Plugin CPU' 'milliseconds/s' apps.plugin netdata.apps_cpu stacked 140000 %1$d\n"
- "DIMENSION user '' incremental 1 1000\n"
- "DIMENSION system '' incremental 1 1000\n"
- "CHART netdata.apps_sizes '' 'Apps Plugin Files' 'files/s' apps.plugin netdata.apps_sizes line 140001 %1$d\n"
- "DIMENSION calls '' incremental 1 1\n"
- "DIMENSION files '' incremental 1 1\n"
- "DIMENSION filenames '' incremental 1 1\n"
- "DIMENSION inode_changes '' incremental 1 1\n"
- "DIMENSION link_changes '' incremental 1 1\n"
- "DIMENSION pids '' absolute 1 1\n"
- "DIMENSION fds '' absolute 1 1\n"
- "DIMENSION targets '' absolute 1 1\n"
- "DIMENSION new_pids 'new pids' incremental 1 1\n"
- , update_every
- );
-
- fprintf(stdout,
- "CHART netdata.apps_fix '' 'Apps Plugin Normalization Ratios' 'percentage' apps.plugin netdata.apps_fix line 140002 %1$d\n"
- "DIMENSION utime '' absolute 1 %2$llu\n"
- "DIMENSION stime '' absolute 1 %2$llu\n"
- "DIMENSION gtime '' absolute 1 %2$llu\n"
- "DIMENSION minflt '' absolute 1 %2$llu\n"
- "DIMENSION majflt '' absolute 1 %2$llu\n"
- , update_every
- , RATES_DETAIL
- );
-
- if(include_exited_childs)
- fprintf(stdout,
- "CHART netdata.apps_children_fix '' 'Apps Plugin Exited Children Normalization Ratios' 'percentage' apps.plugin netdata.apps_children_fix line 140003 %1$d\n"
- "DIMENSION cutime '' absolute 1 %2$llu\n"
- "DIMENSION cstime '' absolute 1 %2$llu\n"
- "DIMENSION cgtime '' absolute 1 %2$llu\n"
- "DIMENSION cminflt '' absolute 1 %2$llu\n"
- "DIMENSION cmajflt '' absolute 1 %2$llu\n"
- , update_every
- , RATES_DETAIL
- );
-
- }
-
- fprintf(stdout,
- "BEGIN netdata.apps_cpu %"PRIu64"\n"
- "SET user = %"PRIu64"\n"
- "SET system = %"PRIu64"\n"
- "END\n"
- "BEGIN netdata.apps_sizes %"PRIu64"\n"
- "SET calls = %zu\n"
- "SET files = %zu\n"
- "SET filenames = %zu\n"
- "SET inode_changes = %zu\n"
- "SET link_changes = %zu\n"
- "SET pids = %zu\n"
- "SET fds = %d\n"
- "SET targets = %zu\n"
- "SET new_pids = %zu\n"
- "END\n"
- , dt
- , cpuuser
- , cpusyst
- , dt
- , calls_counter
- , file_counter
- , filenames_allocated_counter
- , inodes_changed_counter
- , links_changed_counter
- , all_pids_count
- , all_files_len
- , apps_groups_targets_count
- , targets_assignment_counter
- );
-
- fprintf(stdout,
- "BEGIN netdata.apps_fix %"PRIu64"\n"
- "SET utime = %u\n"
- "SET stime = %u\n"
- "SET gtime = %u\n"
- "SET minflt = %u\n"
- "SET majflt = %u\n"
- "END\n"
- , dt
- , (unsigned int)(utime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(stime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(gtime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(minflt_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(majflt_fix_ratio * 100 * RATES_DETAIL)
- );
-
- if(include_exited_childs)
- fprintf(stdout,
- "BEGIN netdata.apps_children_fix %"PRIu64"\n"
- "SET cutime = %u\n"
- "SET cstime = %u\n"
- "SET cgtime = %u\n"
- "SET cminflt = %u\n"
- "SET cmajflt = %u\n"
- "END\n"
- , dt
- , (unsigned int)(cutime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(cstime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(cgtime_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(cminflt_fix_ratio * 100 * RATES_DETAIL)
- , (unsigned int)(cmajflt_fix_ratio * 100 * RATES_DETAIL)
- );
-}
-
-static void normalize_utilization(struct target *root) {
- struct target *w;
-
- // childs processing introduces spikes
- // here we try to eliminate them by disabling childs processing either for specific dimensions
- // or entirely. Of course, either way, we disable it just a single iteration.
-
- kernel_uint_t max_time = get_system_cpus() * time_factor * RATES_DETAIL;
- kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0;
-
- if(global_utime > max_time) global_utime = max_time;
- if(global_stime > max_time) global_stime = max_time;
- if(global_gtime > max_time) global_gtime = max_time;
-
- for(w = root; w ; w = w->next) {
- if(w->target || (!w->processes && !w->exposed)) continue;
-
- utime += w->utime;
- stime += w->stime;
- gtime += w->gtime;
- cutime += w->cutime;
- cstime += w->cstime;
- cgtime += w->cgtime;
-
- minflt += w->minflt;
- majflt += w->majflt;
- cminflt += w->cminflt;
- cmajflt += w->cmajflt;
- }
-
- if(global_utime || global_stime || global_gtime) {
- if(global_utime + global_stime + global_gtime > utime + cutime + stime + cstime + gtime + cgtime) {
- // everything we collected fits
- utime_fix_ratio =
- stime_fix_ratio =
- gtime_fix_ratio =
- cutime_fix_ratio =
- cstime_fix_ratio =
- cgtime_fix_ratio = 1.0; //(NETDATA_DOUBLE)(global_utime + global_stime) / (NETDATA_DOUBLE)(utime + cutime + stime + cstime);
- }
- else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) {
- // children resources are too high
- // lower only the children resources
- utime_fix_ratio =
- stime_fix_ratio =
- gtime_fix_ratio = 1.0;
- cutime_fix_ratio =
- cstime_fix_ratio =
- cgtime_fix_ratio = (NETDATA_DOUBLE)((global_utime + global_stime) - (utime + stime)) / (NETDATA_DOUBLE)(cutime + cstime);
- }
- else if(utime || stime) {
- // even running processes are unrealistic
- // zero the children resources
- // lower the running processes resources
- utime_fix_ratio =
- stime_fix_ratio =
- gtime_fix_ratio = (NETDATA_DOUBLE)(global_utime + global_stime) / (NETDATA_DOUBLE)(utime + stime);
- cutime_fix_ratio =
- cstime_fix_ratio =
- cgtime_fix_ratio = 0.0;
- }
- else {
- utime_fix_ratio =
- stime_fix_ratio =
- gtime_fix_ratio =
- cutime_fix_ratio =
- cstime_fix_ratio =
- cgtime_fix_ratio = 0.0;
- }
- }
- else {
- utime_fix_ratio =
- stime_fix_ratio =
- gtime_fix_ratio =
- cutime_fix_ratio =
- cstime_fix_ratio =
- cgtime_fix_ratio = 0.0;
- }
-
- if(utime_fix_ratio > 1.0) utime_fix_ratio = 1.0;
- if(cutime_fix_ratio > 1.0) cutime_fix_ratio = 1.0;
- if(stime_fix_ratio > 1.0) stime_fix_ratio = 1.0;
- if(cstime_fix_ratio > 1.0) cstime_fix_ratio = 1.0;
- if(gtime_fix_ratio > 1.0) gtime_fix_ratio = 1.0;
- if(cgtime_fix_ratio > 1.0) cgtime_fix_ratio = 1.0;
-
- // if(utime_fix_ratio < 0.0) utime_fix_ratio = 0.0;
- // if(cutime_fix_ratio < 0.0) cutime_fix_ratio = 0.0;
- // if(stime_fix_ratio < 0.0) stime_fix_ratio = 0.0;
- // if(cstime_fix_ratio < 0.0) cstime_fix_ratio = 0.0;
- // if(gtime_fix_ratio < 0.0) gtime_fix_ratio = 0.0;
- // if(cgtime_fix_ratio < 0.0) cgtime_fix_ratio = 0.0;
-
- // TODO
- // we use cpu time to normalize page faults
- // the problem is that to find the proper max values
- // for page faults we have to parse /proc/vmstat
- // which is quite big to do it again (netdata does it already)
- //
- // a better solution could be to somehow have netdata
- // do this normalization for us
-
- if(utime || stime || gtime)
- majflt_fix_ratio =
- minflt_fix_ratio = (NETDATA_DOUBLE)(utime * utime_fix_ratio + stime * stime_fix_ratio + gtime * gtime_fix_ratio) / (NETDATA_DOUBLE)(utime + stime + gtime);
- else
- minflt_fix_ratio =
- majflt_fix_ratio = 1.0;
-
- if(cutime || cstime || cgtime)
- cmajflt_fix_ratio =
- cminflt_fix_ratio = (NETDATA_DOUBLE)(cutime * cutime_fix_ratio + cstime * cstime_fix_ratio + cgtime * cgtime_fix_ratio) / (NETDATA_DOUBLE)(cutime + cstime + cgtime);
- else
- cminflt_fix_ratio =
- cmajflt_fix_ratio = 1.0;
-
- // the report
-
- debug_log(
- "SYSTEM: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
- "COLLECTED: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
- "DELTA: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " "
- "FIX: u=%0.2f s=%0.2f g=%0.2f cu=%0.2f cs=%0.2f cg=%0.2f "
- "FINALLY: u=" KERNEL_UINT_FORMAT " s=" KERNEL_UINT_FORMAT " g=" KERNEL_UINT_FORMAT " cu=" KERNEL_UINT_FORMAT " cs=" KERNEL_UINT_FORMAT " cg=" KERNEL_UINT_FORMAT " "
- , global_utime
- , global_stime
- , global_gtime
- , utime
- , stime
- , gtime
- , cutime
- , cstime
- , cgtime
- , utime + cutime - global_utime
- , stime + cstime - global_stime
- , gtime + cgtime - global_gtime
- , utime_fix_ratio
- , stime_fix_ratio
- , gtime_fix_ratio
- , cutime_fix_ratio
- , cstime_fix_ratio
- , cgtime_fix_ratio
- , (kernel_uint_t)(utime * utime_fix_ratio)
- , (kernel_uint_t)(stime * stime_fix_ratio)
- , (kernel_uint_t)(gtime * gtime_fix_ratio)
- , (kernel_uint_t)(cutime * cutime_fix_ratio)
- , (kernel_uint_t)(cstime * cstime_fix_ratio)
- , (kernel_uint_t)(cgtime * cgtime_fix_ratio)
- );
-}
-
-static void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt) {
- struct target *w;
-
- for (w = root; w ; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- send_BEGIN(type, w->clean_name, "processes", dt);
- send_SET("processes", w->processes);
- send_END();
-
- send_BEGIN(type, w->clean_name, "threads", dt);
- send_SET("threads", w->num_threads);
- send_END();
-
- if (unlikely(!w->processes && !w->is_other))
- continue;
-
- send_BEGIN(type, w->clean_name, "cpu_utilization", dt);
- send_SET("user", (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cutime * cutime_fix_ratio)) : 0ULL));
- send_SET("system", (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cstime * cstime_fix_ratio)) : 0ULL));
- send_END();
-
-#ifndef __FreeBSD__
- if (enable_guest_charts) {
- send_BEGIN(type, w->clean_name, "cpu_guest_utilization", dt);
- send_SET("guest", (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)) : 0ULL));
- send_END();
- }
-
- send_BEGIN(type, w->clean_name, "cpu_context_switches", dt);
- send_SET("voluntary", w->status_voluntary_ctxt_switches);
- send_SET("involuntary", w->status_nonvoluntary_ctxt_switches);
- send_END();
-
- send_BEGIN(type, w->clean_name, "mem_private_usage", dt);
- send_SET("mem", (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared) : 0ULL);
- send_END();
-#endif
-
- send_BEGIN(type, w->clean_name, "mem_usage", dt);
- send_SET("rss", w->status_vmrss);
- send_END();
-
- send_BEGIN(type, w->clean_name, "vmem_usage", dt);
- send_SET("vmem", w->status_vmsize);
- send_END();
-
- send_BEGIN(type, w->clean_name, "mem_page_faults", dt);
- send_SET("minor", (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)) : 0ULL));
- send_SET("major", (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)) : 0ULL));
- send_END();
-
-#ifndef __FreeBSD__
- send_BEGIN(type, w->clean_name, "swap_usage", dt);
- send_SET("swap", w->status_vmswap);
- send_END();
-#endif
-
-#ifndef __FreeBSD__
- if (w->processes == 0) {
- send_BEGIN(type, w->clean_name, "uptime", dt);
- send_SET("uptime", 0);
- send_END();
-
- if (enable_detailed_uptime_charts) {
- send_BEGIN(type, w->clean_name, "uptime_summary", dt);
- send_SET("min", 0);
- send_SET("avg", 0);
- send_SET("max", 0);
- send_END();
- }
- } else {
- send_BEGIN(type, w->clean_name, "uptime", dt);
- send_SET("uptime", (global_uptime > w->starttime) ? (global_uptime - w->starttime) : 0);
- send_END();
-
- if (enable_detailed_uptime_charts) {
- send_BEGIN(type, w->clean_name, "uptime_summary", dt);
- send_SET("min", w->uptime_min);
- send_SET("avg", w->processes > 0 ? w->uptime_sum / w->processes : 0);
- send_SET("max", w->uptime_max);
- send_END();
- }
- }
-#endif
-
- send_BEGIN(type, w->clean_name, "disk_physical_io", dt);
- send_SET("reads", w->io_storage_bytes_read);
- send_SET("writes", w->io_storage_bytes_written);
- send_END();
-
-#ifndef __FreeBSD__
- send_BEGIN(type, w->clean_name, "disk_logical_io", dt);
- send_SET("reads", w->io_logical_bytes_read);
- send_SET("writes", w->io_logical_bytes_written);
- send_END();
-#endif
- if (enable_file_charts) {
- send_BEGIN(type, w->clean_name, "fds_open_limit", dt);
- send_SET("limit", w->max_open_files_percent * 100.0);
- send_END();
-
- send_BEGIN(type, w->clean_name, "fds_open", dt);
- send_SET("files", w->openfds.files);
- send_SET("sockets", w->openfds.sockets);
- send_SET("pipes", w->openfds.sockets);
- send_SET("inotifies", w->openfds.inotifies);
- send_SET("event", w->openfds.eventfds);
- send_SET("timer", w->openfds.timerfds);
- send_SET("signal", w->openfds.signalfds);
- send_SET("eventpolls", w->openfds.eventpolls);
- send_SET("other", w->openfds.other);
- send_END();
- }
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// generate the charts
-
-static void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title)
-{
- struct target *w;
-
- if (debug_enabled) {
- for (w = root; w; w = w->next) {
- if (unlikely(!w->target && w->processes)) {
- struct pid_on_target *pid_on_target;
- fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process(es):", w->name, w->processes);
- for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
- fprintf(stderr, " %d", pid_on_target->pid);
- }
- fputc('\n', stderr);
- }
- }
- }
-
- for (w = root; w; w = w->next) {
- if (likely(w->exposed || (!w->processes && !w->is_other)))
- continue;
-
- w->exposed = 1;
-
- fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
- fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
-
-#ifndef __FreeBSD__
- if (enable_guest_charts) {
- fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
- }
-
- fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL);
- fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL);
-
- fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L);
-#endif
-
- fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L);
-
- fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL);
- fprintf(stdout, "DIMENSION minor '' absolute 1 %llu\n", RATES_DETAIL);
-
- fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L);
-
-#ifndef __FreeBSD__
- fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L);
-#endif
-
-#ifndef __FreeBSD__
- fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL);
- fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL);
-
- fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL);
- fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL);
-#else
- fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'blocks/s' disk %s.disk_physical_block_io area 20100 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", RATES_DETAIL);
- fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", RATES_DETAIL);
-#endif
-
- fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION processes '' absolute 1 1\n");
-
- fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION threads '' absolute 1 1\n");
-
- if (enable_file_charts) {
- fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION limit '' absolute 1 100\n");
-
- fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION files '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION event '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION timer '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION signal '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION other '' absolute 1 1\n");
- }
-
-#ifndef __FreeBSD__
- fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION uptime '' absolute 1 1\n");
-
- if (enable_detailed_uptime_charts) {
- fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION min '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION avg '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION max '' absolute 1 1\n");
- }
-#endif
- }
-}
-
-#ifndef __FreeBSD__
-static void send_proc_states_count(usec_t dt)
-{
- static bool chart_added = false;
- // create chart for count of processes in different states
- if (!chart_added) {
- fprintf(
- stdout,
- "CHART system.processes_state '' 'System Processes State' 'processes' processes system.processes_state line %d %d\n",
- NETDATA_CHART_PRIO_SYSTEM_PROCESS_STATES,
- update_every);
- for (proc_state i = PROC_STATUS_RUNNING; i < PROC_STATUS_END; i++) {
- fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", proc_states[i]);
- }
- chart_added = true;
- }
-
- // send process state count
- fprintf(stdout, "BEGIN system.processes_state %" PRIu64 "\n", dt);
- for (proc_state i = PROC_STATUS_RUNNING; i < PROC_STATUS_END; i++) {
- send_SET(proc_states[i], proc_state_count[i]);
- }
- send_END();
-}
-#endif
-
-// ----------------------------------------------------------------------------
-// parse command line arguments
-
-int check_proc_1_io() {
- int ret = 0;
-
- procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) goto cleanup;
-
- ff = procfile_readall(ff);
- if(!ff) goto cleanup;
-
- ret = 1;
-
-cleanup:
- procfile_close(ff);
- return ret;
-}
-
-static void parse_args(int argc, char **argv)
-{
- int i, freq = 0;
-
- for(i = 1; i < argc; i++) {
- if(!freq) {
- int n = (int)str2l(argv[i]);
- if(n > 0) {
- freq = n;
- continue;
- }
- }
-
- if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("apps.plugin %s\n", VERSION);
- exit(0);
- }
-
- if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
- if(!check_proc_1_io()) {
- perror("Tried to read /proc/1/io and it failed");
- exit(1);
- }
- printf("OK\n");
- exit(0);
- }
-
- if(strcmp("debug", argv[i]) == 0) {
- debug_enabled = 1;
-#ifndef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "apps.plugin has been compiled without debugging\n");
-#endif
- continue;
- }
-
-#ifndef __FreeBSD__
- if(strcmp("fds-cache-secs", argv[i]) == 0) {
- if(argc <= i + 1) {
- fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n");
- exit(1);
- }
- i++;
- max_fds_cache_seconds = str2i(argv[i]);
- if(max_fds_cache_seconds < 0) max_fds_cache_seconds = 0;
- continue;
- }
-#endif
-
- if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) {
- include_exited_childs = 0;
- continue;
- }
-
- if(strcmp("with-childs", argv[i]) == 0) {
- include_exited_childs = 1;
- continue;
- }
-
- if(strcmp("with-guest", argv[i]) == 0) {
- enable_guest_charts = 1;
- continue;
- }
-
- if(strcmp("no-guest", argv[i]) == 0 || strcmp("without-guest", argv[i]) == 0) {
- enable_guest_charts = 0;
- continue;
- }
-
- if(strcmp("with-files", argv[i]) == 0) {
- enable_file_charts = 1;
- continue;
- }
-
- if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) {
- enable_file_charts = 0;
- continue;
- }
-
- if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) {
- enable_users_charts = 0;
- continue;
- }
-
- if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) {
- enable_groups_charts = 0;
- continue;
- }
-
- if(strcmp("with-detailed-uptime", argv[i]) == 0) {
- enable_detailed_uptime_charts = 1;
- continue;
- }
- if(strcmp("with-function-cmdline", argv[i]) == 0) {
- enable_function_cmdline = 1;
- continue;
- }
-
- if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata apps.plugin %s\n"
- " Copyright (C) 2016-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " SECONDS set the data collection frequency\n"
- "\n"
- " debug enable debugging (lot of output)\n"
- "\n"
- " with-function-cmdline enable reporting the complete command line for processes\n"
- " it includes the command and passed arguments\n"
- " it may include sensitive data such as passwords and tokens\n"
- " enabling this could be a security risk\n"
- "\n"
- " with-childs\n"
- " without-childs enable / disable aggregating exited\n"
- " children resources into parents\n"
- " (default is enabled)\n"
- "\n"
- " with-guest\n"
- " without-guest enable / disable reporting guest charts\n"
- " (default is disabled)\n"
- "\n"
- " with-files\n"
- " without-files enable / disable reporting files, sockets, pipes\n"
- " (default is enabled)\n"
- "\n"
- " without-users disable reporting per user charts\n"
- "\n"
- " without-groups disable reporting per user group charts\n"
- "\n"
- " with-detailed-uptime enable reporting min/avg/max uptime charts\n"
- "\n"
-#ifndef __FreeBSD__
- " fds-cache-secs N cache the files of processed for N seconds\n"
- " caching is adaptive per file (when a file\n"
- " is found, it starts at 0 and while the file\n"
- " remains open, it is incremented up to the\n"
- " max given)\n"
- " (default is %d seconds)\n"
- "\n"
-#endif
- " version or -v or -V print program version and exit\n"
- "\n"
- , VERSION
-#ifndef __FreeBSD__
- , max_fds_cache_seconds
-#endif
- );
- exit(1);
- }
-
- netdata_log_error("Cannot understand option %s", argv[i]);
- exit(1);
- }
-
- if(freq > 0) update_every = freq;
-
- if(read_apps_groups_conf(user_config_dir, "groups")) {
- netdata_log_info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'", user_config_dir, stock_config_dir);
-
- if(read_apps_groups_conf(stock_config_dir, "groups")) {
- netdata_log_error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.", stock_config_dir);
- exit(1);
- }
- else
- netdata_log_info("Loaded config file '%s/apps_groups.conf'", stock_config_dir);
- }
- else
- netdata_log_info("Loaded config file '%s/apps_groups.conf'", user_config_dir);
-}
-
-static int am_i_running_as_root() {
- uid_t uid = getuid(), euid = geteuid();
-
- if(uid == 0 || euid == 0) {
- if(debug_enabled) netdata_log_info("I am running with escalated privileges, uid = %u, euid = %u.", uid, euid);
- return 1;
- }
-
- if(debug_enabled) netdata_log_info("I am not running with escalated privileges, uid = %u, euid = %u.", uid, euid);
- return 0;
-}
-
-#ifdef HAVE_CAPABILITY
-static int check_capabilities() {
- cap_t caps = cap_get_proc();
- if(!caps) {
- netdata_log_error("Cannot get current capabilities.");
- return 0;
- }
- else if(debug_enabled)
- netdata_log_info("Received my capabilities from the system.");
-
- int ret = 1;
-
- cap_flag_value_t cfv = CAP_CLEAR;
- if(cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) {
- netdata_log_error("Cannot find if CAP_DAC_READ_SEARCH is effective.");
- ret = 0;
- }
- else {
- if(cfv != CAP_SET) {
- netdata_log_error("apps.plugin should run with CAP_DAC_READ_SEARCH.");
- ret = 0;
- }
- else if(debug_enabled)
- netdata_log_info("apps.plugin runs with CAP_DAC_READ_SEARCH.");
- }
-
- cfv = CAP_CLEAR;
- if(cap_get_flag(caps, CAP_SYS_PTRACE, CAP_EFFECTIVE, &cfv) == -1) {
- netdata_log_error("Cannot find if CAP_SYS_PTRACE is effective.");
- ret = 0;
- }
- else {
- if(cfv != CAP_SET) {
- netdata_log_error("apps.plugin should run with CAP_SYS_PTRACE.");
- ret = 0;
- }
- else if(debug_enabled)
- netdata_log_info("apps.plugin runs with CAP_SYS_PTRACE.");
- }
-
- cap_free(caps);
-
- return ret;
-}
-#else
-static int check_capabilities() {
- return 0;
-}
-#endif
-
-static netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER;
-
-#define PROCESS_FILTER_CATEGORY "category:"
-#define PROCESS_FILTER_USER "user:"
-#define PROCESS_FILTER_GROUP "group:"
-#define PROCESS_FILTER_PROCESS "process:"
-#define PROCESS_FILTER_PID "pid:"
-#define PROCESS_FILTER_UID "uid:"
-#define PROCESS_FILTER_GID "gid:"
-
-static struct target *find_target_by_name(struct target *base, const char *name) {
- struct target *t;
- for(t = base; t ; t = t->next) {
- if (strcmp(t->name, name) == 0)
- return t;
- }
-
- return NULL;
-}
-
-static kernel_uint_t MemTotal = 0;
-
-static void get_MemTotal(void) {
-#ifdef __FreeBSD__
- // TODO - fix this for FreeBSD
- return;
-#else
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/meminfo", netdata_configured_host_prefix);
-
- procfile *ff = procfile_open(filename, ": \t", PROCFILE_FLAG_DEFAULT);
- if(!ff)
- return;
-
- ff = procfile_readall(ff);
- if(!ff)
- return;
-
- size_t line, lines = procfile_lines(ff);
-
- for(line = 0; line < lines ;line++) {
- size_t words = procfile_linewords(ff, line);
- if(words == 3 && strcmp(procfile_lineword(ff, line, 0), "MemTotal") == 0 && strcmp(procfile_lineword(ff, line, 2), "kB") == 0) {
- kernel_uint_t n = str2ull(procfile_lineword(ff, line, 1), NULL);
- if(n) MemTotal = n;
- break;
- }
- }
-
- procfile_close(ff);
-#endif
-}
-
-static void apps_plugin_function_processes_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb, "%s",
- "apps.plugin / processes\n"
- "\n"
- "Function `processes` presents all the currently running processes of the system.\n"
- "\n"
- "The following filters are supported:\n"
- "\n"
- " category:NAME\n"
- " Shows only processes that are assigned the category `NAME` in apps_groups.conf\n"
- "\n"
- " user:NAME\n"
- " Shows only processes that are running as user name `NAME`.\n"
- "\n"
- " group:NAME\n"
- " Shows only processes that are running as group name `NAME`.\n"
- "\n"
- " process:NAME\n"
- " Shows only processes that their Command is `NAME` or their parent's Command is `NAME`.\n"
- "\n"
- " pid:NUMBER\n"
- " Shows only processes that their PID is `NUMBER` or their parent's PID is `NUMBER`\n"
- "\n"
- " uid:NUMBER\n"
- " Shows only processes that their UID is `NUMBER`\n"
- "\n"
- " gid:NUMBER\n"
- " Shows only processes that their GID is `NUMBER`\n"
- "\n"
- "Filters can be combined. Each filter can be given only one time.\n"
- );
-
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
- buffer_free(wb);
-}
-
-#define add_value_field_llu_with_max(wb, key, value) do { \
- unsigned long long _tmp = (value); \
- key ## _max = (rows == 0) ? (_tmp) : MAX(key ## _max, _tmp); \
- buffer_json_add_array_item_uint64(wb, _tmp); \
-} while(0)
-
-#define add_value_field_ndd_with_max(wb, key, value) do { \
- NETDATA_DOUBLE _tmp = (value); \
- key ## _max = (rows == 0) ? (_tmp) : MAX(key ## _max, _tmp); \
- buffer_json_add_array_item_double(wb, _tmp); \
-} while(0)
-
-static void function_processes(const char *transaction, char *function __maybe_unused, int timeout __maybe_unused, bool *cancelled __maybe_unused) {
- struct pid_stat *p;
-
- char *words[PLUGINSD_MAX_WORDS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
-
- struct target *category = NULL, *user = NULL, *group = NULL;
- const char *process_name = NULL;
- pid_t pid = 0;
- uid_t uid = 0;
- gid_t gid = 0;
-
- bool filter_pid = false, filter_uid = false, filter_gid = false;
-
- for(int i = 1; i < PLUGINSD_MAX_WORDS ;i++) {
- const char *keyword = get_word(words, num_words, i);
- if(!keyword) break;
-
- if(!category && strncmp(keyword, PROCESS_FILTER_CATEGORY, strlen(PROCESS_FILTER_CATEGORY)) == 0) {
- category = find_target_by_name(apps_groups_root_target, &keyword[strlen(PROCESS_FILTER_CATEGORY)]);
- if(!category) {
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST,
- "No category with that name found.");
- return;
- }
- }
- else if(!user && strncmp(keyword, PROCESS_FILTER_USER, strlen(PROCESS_FILTER_USER)) == 0) {
- user = find_target_by_name(users_root_target, &keyword[strlen(PROCESS_FILTER_USER)]);
- if(!user) {
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST,
- "No user with that name found.");
- return;
- }
- }
- else if(strncmp(keyword, PROCESS_FILTER_GROUP, strlen(PROCESS_FILTER_GROUP)) == 0) {
- group = find_target_by_name(groups_root_target, &keyword[strlen(PROCESS_FILTER_GROUP)]);
- if(!group) {
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST,
- "No group with that name found.");
- return;
- }
- }
- else if(!process_name && strncmp(keyword, PROCESS_FILTER_PROCESS, strlen(PROCESS_FILTER_PROCESS)) == 0) {
- process_name = &keyword[strlen(PROCESS_FILTER_PROCESS)];
- }
- else if(!pid && strncmp(keyword, PROCESS_FILTER_PID, strlen(PROCESS_FILTER_PID)) == 0) {
- pid = str2i(&keyword[strlen(PROCESS_FILTER_PID)]);
- filter_pid = true;
- }
- else if(!uid && strncmp(keyword, PROCESS_FILTER_UID, strlen(PROCESS_FILTER_UID)) == 0) {
- uid = str2i(&keyword[strlen(PROCESS_FILTER_UID)]);
- filter_uid = true;
- }
- else if(!gid && strncmp(keyword, PROCESS_FILTER_GID, strlen(PROCESS_FILTER_GID)) == 0) {
- gid = str2i(&keyword[strlen(PROCESS_FILTER_GID)]);
- filter_gid = true;
- }
- else if(strcmp(keyword, "help") == 0) {
- apps_plugin_function_processes_help(transaction);
- return;
- }
- else {
- char msg[PLUGINSD_LINE_MAX];
- snprintfz(msg, PLUGINSD_LINE_MAX, "Invalid parameter '%s'", keyword);
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, msg);
- return;
- }
- }
-
- time_t expires = now_realtime_sec() + update_every;
-
- unsigned int cpu_divisor = time_factor * RATES_DETAIL / 100;
- unsigned int memory_divisor = 1024;
- unsigned int io_divisor = 1024 * RATES_DETAIL;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", update_every);
- buffer_json_member_add_string(wb, "help", APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION);
- buffer_json_member_add_array(wb, "data");
-
- NETDATA_DOUBLE
- UserCPU_max = 0.0
- , SysCPU_max = 0.0
- , GuestCPU_max = 0.0
- , CUserCPU_max = 0.0
- , CSysCPU_max = 0.0
- , CGuestCPU_max = 0.0
- , CPU_max = 0.0
- , VMSize_max = 0.0
- , RSS_max = 0.0
- , Shared_max = 0.0
- , Swap_max = 0.0
- , Memory_max = 0.0
- , FDsLimitPercent_max = 0.0
- ;
-
- unsigned long long
- Processes_max = 0
- , Threads_max = 0
- , VoluntaryCtxtSwitches_max = 0
- , NonVoluntaryCtxtSwitches_max = 0
- , Uptime_max = 0
- , MinFlt_max = 0
- , CMinFlt_max = 0
- , TMinFlt_max = 0
- , MajFlt_max = 0
- , CMajFlt_max = 0
- , TMajFlt_max = 0
- , PReads_max = 0
- , PWrites_max = 0
- , RCalls_max = 0
- , WCalls_max = 0
- , Files_max = 0
- , Pipes_max = 0
- , Sockets_max = 0
- , iNotiFDs_max = 0
- , EventFDs_max = 0
- , TimerFDs_max = 0
- , SigFDs_max = 0
- , EvPollFDs_max = 0
- , OtherFDs_max = 0
- , FDs_max = 0
- ;
-
-#ifndef __FreeBSD__
- unsigned long long
- LReads_max = 0
- , LWrites_max = 0
- ;
-#endif
-
- int rows= 0;
- for(p = root_of_pids; p ; p = p->next) {
- if(!p->updated)
- continue;
-
- if(category && p->target != category)
- continue;
-
- if(user && p->user_target != user)
- continue;
-
- if(group && p->group_target != group)
- continue;
-
- if(process_name && ((strcmp(p->comm, process_name) != 0 && !p->parent) || (p->parent && strcmp(p->comm, process_name) != 0 && strcmp(p->parent->comm, process_name) != 0)))
- continue;
-
- if(filter_pid && p->pid != pid && p->ppid != pid)
- continue;
-
- if(filter_uid && p->uid != uid)
- continue;
-
- if(filter_gid && p->gid != gid)
- continue;
-
- rows++;
-
- buffer_json_add_array_item_array(wb); // for each pid
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE FIELDS!
-
- // pid
- buffer_json_add_array_item_uint64(wb, p->pid);
-
- // cmd
- buffer_json_add_array_item_string(wb, p->comm);
-
- // cmdline
- if (enable_function_cmdline) {
- buffer_json_add_array_item_string(wb, (p->cmdline && *p->cmdline) ? p->cmdline : p->comm);
- }
-
- // ppid
- buffer_json_add_array_item_uint64(wb, p->ppid);
-
- // category
- buffer_json_add_array_item_string(wb, p->target ? p->target->name : "-");
-
- // user
- buffer_json_add_array_item_string(wb, p->user_target ? p->user_target->name : "-");
-
- // uid
- buffer_json_add_array_item_uint64(wb, p->uid);
-
- // group
- buffer_json_add_array_item_string(wb, p->group_target ? p->group_target->name : "-");
-
- // gid
- buffer_json_add_array_item_uint64(wb, p->gid);
-
- // CPU utilization %
- add_value_field_ndd_with_max(wb, CPU, (NETDATA_DOUBLE)(p->utime + p->stime + p->gtime + p->cutime + p->cstime + p->cgtime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, UserCPU, (NETDATA_DOUBLE)(p->utime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, SysCPU, (NETDATA_DOUBLE)(p->stime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, GuestCPU, (NETDATA_DOUBLE)(p->gtime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CUserCPU, (NETDATA_DOUBLE)(p->cutime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CSysCPU, (NETDATA_DOUBLE)(p->cstime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CGuestCPU, (NETDATA_DOUBLE)(p->cgtime) / cpu_divisor);
-
- add_value_field_llu_with_max(wb, VoluntaryCtxtSwitches, p->status_voluntary_ctxt_switches / RATES_DETAIL);
- add_value_field_llu_with_max(wb, NonVoluntaryCtxtSwitches, p->status_nonvoluntary_ctxt_switches / RATES_DETAIL);
-
- // memory MiB
- if(MemTotal)
- add_value_field_ndd_with_max(wb, Memory, (NETDATA_DOUBLE)p->status_vmrss * 100.0 / (NETDATA_DOUBLE)MemTotal);
-
- add_value_field_ndd_with_max(wb, RSS, (NETDATA_DOUBLE)p->status_vmrss / memory_divisor);
- add_value_field_ndd_with_max(wb, Shared, (NETDATA_DOUBLE)p->status_vmshared / memory_divisor);
- add_value_field_ndd_with_max(wb, VMSize, (NETDATA_DOUBLE)p->status_vmsize / memory_divisor);
- add_value_field_ndd_with_max(wb, Swap, (NETDATA_DOUBLE)p->status_vmswap / memory_divisor);
-
- // Physical I/O
- add_value_field_llu_with_max(wb, PReads, p->io_storage_bytes_read / io_divisor);
- add_value_field_llu_with_max(wb, PWrites, p->io_storage_bytes_written / io_divisor);
-
- // Logical I/O
-#ifndef __FreeBSD__
- add_value_field_llu_with_max(wb, LReads, p->io_logical_bytes_read / io_divisor);
- add_value_field_llu_with_max(wb, LWrites, p->io_logical_bytes_written / io_divisor);
-#endif
-
- // I/O calls
- add_value_field_llu_with_max(wb, RCalls, p->io_read_calls / RATES_DETAIL);
- add_value_field_llu_with_max(wb, WCalls, p->io_write_calls / RATES_DETAIL);
-
- // minor page faults
- add_value_field_llu_with_max(wb, MinFlt, p->minflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, CMinFlt, p->cminflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, TMinFlt, (p->minflt + p->cminflt) / RATES_DETAIL);
-
- // major page faults
- add_value_field_llu_with_max(wb, MajFlt, p->majflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, CMajFlt, p->cmajflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, TMajFlt, (p->majflt + p->cmajflt) / RATES_DETAIL);
-
- // open file descriptors
- add_value_field_ndd_with_max(wb, FDsLimitPercent, p->openfds_limits_percent);
- add_value_field_llu_with_max(wb, FDs, pid_openfds_sum(p));
- add_value_field_llu_with_max(wb, Files, p->openfds.files);
- add_value_field_llu_with_max(wb, Pipes, p->openfds.pipes);
- add_value_field_llu_with_max(wb, Sockets, p->openfds.sockets);
- add_value_field_llu_with_max(wb, iNotiFDs, p->openfds.inotifies);
- add_value_field_llu_with_max(wb, EventFDs, p->openfds.eventfds);
- add_value_field_llu_with_max(wb, TimerFDs, p->openfds.timerfds);
- add_value_field_llu_with_max(wb, SigFDs, p->openfds.signalfds);
- add_value_field_llu_with_max(wb, EvPollFDs, p->openfds.eventpolls);
- add_value_field_llu_with_max(wb, OtherFDs, p->openfds.other);
-
-
- // processes, threads, uptime
- add_value_field_llu_with_max(wb, Processes, p->children_count);
- add_value_field_llu_with_max(wb, Threads, p->num_threads);
- add_value_field_llu_with_max(wb, Uptime, p->uptime);
-
- buffer_json_array_close(wb); // for each pid
- }
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
-
- {
- int field_id = 0;
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE VALUES!
- // wb, key, name, visible, type, visualization, transform, decimal_points, units, max, sort, sortable, sticky, unique_key, pointer_to, summary, range
- buffer_rrdf_table_add_field(wb, field_id++, "PID", "Process ID", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY |
- RRDF_FIELD_OPTS_UNIQUE_KEY, NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Cmd", "Process Name", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- if (enable_function_cmdline) {
- buffer_rrdf_table_add_field(wb, field_id++, "CmdLine", "Command Line", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0,
- NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
- }
-
- buffer_rrdf_table_add_field(wb, field_id++, "PPID", "Parent Process ID", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL,
- NAN, RRDF_FIELD_SORT_ASCENDING, "PID", RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Category", "Category (apps_groups.conf)", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE,
- RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "User", "User Owner", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Uid", "User ID", RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Group", "Group Owner", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Gid", "Group ID", RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // CPU utilization
- buffer_rrdf_table_add_field(wb, field_id++, "CPU", "Total CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CPU_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "UserCPU", "User CPU time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", UserCPU_max,
- RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "SysCPU", "System CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", SysCPU_max,
- RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "GuestCPU", "Guest CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", GuestCPU_max,
- RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CUserCPU", "Children User CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CUserCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CSysCPU", "Children System CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CSysCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CGuestCPU", "Children Guest CPU Time (100% = 1 core)",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CGuestCPU_max, RRDF_FIELD_SORT_DESCENDING,
- NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
-
- // CPU context switches
- buffer_rrdf_table_add_field(wb, field_id++, "vCtxSwitch", "Voluntary Context Switches",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s",
- VoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "iCtxSwitch", "Involuntary Context Switches",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s",
- NonVoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
-
- // memory
- if (MemTotal)
- buffer_rrdf_table_add_field(wb, field_id++, "Memory", "Memory Percentage", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Resident", "Resident Set Size", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", RSS_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Shared", "Shared Pages", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "MiB", Shared_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Virtual", "Virtual Memory Size", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "MiB", VMSize_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Swap", "Swap Memory", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "MiB",
- Swap_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // Physical I/O
- buffer_rrdf_table_add_field(wb, field_id++, "PReads", "Physical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "KiB/s", PReads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "PWrites", "Physical I/O Writes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", PWrites_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
-
- // Logical I/O
-#ifndef __FreeBSD__
- buffer_rrdf_table_add_field(wb, field_id++, "LReads", "Logical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "KiB/s", LReads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "LWrites", "Logical I/O Writes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "KiB/s", LWrites_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-#endif
-
- // I/O calls
- buffer_rrdf_table_add_field(wb, field_id++, "RCalls", "I/O Read Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "calls/s", RCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "WCalls", "I/O Write Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "calls/s", WCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // minor page faults
- buffer_rrdf_table_add_field(wb, field_id++, "MinFlt", "Minor Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "pgflts/s", MinFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CMinFlt", "Children Minor Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "TMinFlt", "Total Minor Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // major page faults
- buffer_rrdf_table_add_field(wb, field_id++, "MajFlt", "Major Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "pgflts/s", MajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CMajFlt", "Children Major Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMajFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "TMajFlt", "Total Major Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMajFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // open file descriptors
- buffer_rrdf_table_add_field(wb, field_id++, "FDsLimitPercent", "Percentage of Open Descriptors vs Limits",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", FDsLimitPercent_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "FDs", "All Open File Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", FDs_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Files", "Open Files", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
- "fds",
- Files_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Pipes", "Open Pipes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
- "fds",
- Pipes_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Sockets", "Open Sockets", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
- "fds", Sockets_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "iNotiFDs", "Open iNotify Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", iNotiFDs_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "EventFDs", "Open Event Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", EventFDs_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "TimerFDs", "Open Timer Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", TimerFDs_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "SigFDs", "Open Signal Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", SigFDs_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "EvPollFDs", "Open Event Poll Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", EvPollFDs_max,
- RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "OtherFDs", "Other Open Descriptors",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", OtherFDs_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- // processes, threads, uptime
- buffer_rrdf_table_add_field(wb, field_id++, "Processes", "Processes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
- "processes", Processes_max, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Threads", "Threads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
- "threads", Threads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Uptime", "Uptime in seconds", RRDF_FIELD_TYPE_DURATION,
- RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_DURATION_S, 2,
- "seconds", Uptime_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_MAX,
- RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_VISIBLE, NULL);
- }
- buffer_json_object_close(wb); // columns
-
- buffer_json_member_add_string(wb, "default_sort_column", "CPU");
-
- buffer_json_member_add_object(wb, "charts");
- {
- // CPU chart
- buffer_json_member_add_object(wb, "CPU");
- {
- buffer_json_member_add_string(wb, "name", "CPU Utilization");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "UserCPU");
- buffer_json_add_array_item_string(wb, "SysCPU");
- buffer_json_add_array_item_string(wb, "GuestCPU");
- buffer_json_add_array_item_string(wb, "CUserCPU");
- buffer_json_add_array_item_string(wb, "CSysCPU");
- buffer_json_add_array_item_string(wb, "CGuestCPU");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "CPUCtxSwitches");
- {
- buffer_json_member_add_string(wb, "name", "CPU Context Switches");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "vCtxSwitch");
- buffer_json_add_array_item_string(wb, "iCtxSwitch");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Memory chart
- buffer_json_member_add_object(wb, "Memory");
- {
- buffer_json_member_add_string(wb, "name", "Memory");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Virtual");
- buffer_json_add_array_item_string(wb, "Resident");
- buffer_json_add_array_item_string(wb, "Shared");
- buffer_json_add_array_item_string(wb, "Swap");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- if(MemTotal) {
- // Memory chart
- buffer_json_member_add_object(wb, "MemoryPercent");
- {
- buffer_json_member_add_string(wb, "name", "Memory Percentage");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Memory");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
-
-#ifndef __FreeBSD__
- // I/O Reads chart
- buffer_json_member_add_object(wb, "Reads");
- {
- buffer_json_member_add_string(wb, "name", "I/O Reads");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "LReads");
- buffer_json_add_array_item_string(wb, "PReads");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // I/O Writes chart
- buffer_json_member_add_object(wb, "Writes");
- {
- buffer_json_member_add_string(wb, "name", "I/O Writes");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "LWrites");
- buffer_json_add_array_item_string(wb, "PWrites");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Logical I/O chart
- buffer_json_member_add_object(wb, "LogicalIO");
- {
- buffer_json_member_add_string(wb, "name", "Logical I/O");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "LReads");
- buffer_json_add_array_item_string(wb, "LWrites");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-#endif
-
- // Physical I/O chart
- buffer_json_member_add_object(wb, "PhysicalIO");
- {
- buffer_json_member_add_string(wb, "name", "Physical I/O");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "PReads");
- buffer_json_add_array_item_string(wb, "PWrites");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // I/O Calls chart
- buffer_json_member_add_object(wb, "IOCalls");
- {
- buffer_json_member_add_string(wb, "name", "I/O Calls");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "RCalls");
- buffer_json_add_array_item_string(wb, "WCalls");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Minor Page Faults chart
- buffer_json_member_add_object(wb, "MinFlt");
- {
- buffer_json_member_add_string(wb, "name", "Minor Page Faults");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "MinFlt");
- buffer_json_add_array_item_string(wb, "CMinFlt");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Major Page Faults chart
- buffer_json_member_add_object(wb, "MajFlt");
- {
- buffer_json_member_add_string(wb, "name", "Major Page Faults");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "MajFlt");
- buffer_json_add_array_item_string(wb, "CMajFlt");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Threads chart
- buffer_json_member_add_object(wb, "Threads");
- {
- buffer_json_member_add_string(wb, "name", "Threads");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Threads");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Processes chart
- buffer_json_member_add_object(wb, "Processes");
- {
- buffer_json_member_add_string(wb, "name", "Processes");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Processes");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // FDs chart
- buffer_json_member_add_object(wb, "FDs");
- {
- buffer_json_member_add_string(wb, "name", "File Descriptors");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Files");
- buffer_json_add_array_item_string(wb, "Pipes");
- buffer_json_add_array_item_string(wb, "Sockets");
- buffer_json_add_array_item_string(wb, "iNotiFDs");
- buffer_json_add_array_item_string(wb, "EventFDs");
- buffer_json_add_array_item_string(wb, "TimerFDs");
- buffer_json_add_array_item_string(wb, "SigFDs");
- buffer_json_add_array_item_string(wb, "EvPollFDs");
- buffer_json_add_array_item_string(wb, "OtherFDs");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "CPU");
- buffer_json_add_array_item_string(wb, "Category");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Memory");
- buffer_json_add_array_item_string(wb, "Category");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_object(wb, "group_by");
- {
- // group by PID
- buffer_json_member_add_object(wb, "PID");
- {
- buffer_json_member_add_string(wb, "name", "Process Tree by PID");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "PPID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Category
- buffer_json_member_add_object(wb, "Category");
- {
- buffer_json_member_add_string(wb, "name", "Process Tree by Category");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Category");
- buffer_json_add_array_item_string(wb, "PPID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by User
- buffer_json_member_add_object(wb, "User");
- {
- buffer_json_member_add_string(wb, "name", "Process Tree by User");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "User");
- buffer_json_add_array_item_string(wb, "PPID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Group
- buffer_json_member_add_object(wb, "Group");
- {
- buffer_json_member_add_string(wb, "name", "Process Tree by Group");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Group");
- buffer_json_add_array_item_string(wb, "PPID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", expires);
- buffer_json_finalize(wb);
-
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb);
-
- buffer_free(wb);
-}
-
-static bool apps_plugin_exit = false;
-
-int main(int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("apps.plugin");
-
- pagesize = (size_t)sysconf(_SC_PAGESIZE);
-
- bool send_resource_usage = true;
- {
- const char *s = getenv("NETDATA_INTERNALS_MONITORING");
- if(s && *s && strcmp(s, "NO") == 0)
- send_resource_usage = false;
- }
-
- // since apps.plugin runs as root, prevent it from opening symbolic links
- procfile_open_flags = O_RDONLY|O_NOFOLLOW;
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix(true) == -1) exit(1);
-
- user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
- if(user_config_dir == NULL) {
- // netdata_log_info("NETDATA_CONFIG_DIR is not passed from netdata");
- user_config_dir = CONFIG_DIR;
- }
- // else netdata_log_info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
-
- stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
- if(stock_config_dir == NULL) {
- // netdata_log_info("NETDATA_CONFIG_DIR is not passed from netdata");
- stock_config_dir = LIBCONFIG_DIR;
- }
- // else netdata_log_info("Found NETDATA_USER_CONFIG_DIR='%s'", user_config_dir);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(debug_flags != 0) {
- struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
- if(setrlimit(RLIMIT_CORE, &rl) != 0)
- netdata_log_info("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
-#ifdef HAVE_SYS_PRCTL_H
- prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
-#endif
- }
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- procfile_adaptive_initial_allocation = 1;
-
- get_system_HZ();
-#ifdef __FreeBSD__
- time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
-#else
- time_factor = system_hz; // Linux uses clock ticks
-#endif
-
- get_system_pid_max();
- get_system_cpus_uncached();
-
- parse_args(argc, argv);
-
- if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) {
- uid_t uid = getuid(), euid = geteuid();
-#ifdef HAVE_CAPABILITY
- netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
- "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
- , uid, euid, argv[0], argv[0], argv[0]
- );
-#else
- netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
- "Your system does not support capabilities. "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
- , uid, euid, argv[0], argv[0]
- );
-#endif
- }
-
- netdata_log_info("started on pid %d", getpid());
-
- snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix);
- debug_log("passwd file: '%s'", all_user_ids.filename);
-
- snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix);
- debug_log("group file: '%s'", all_group_ids.filename);
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
- all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max + 1);
-#endif
-
- all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max + 1);
-
- // ------------------------------------------------------------------------
- // the event loop for functions
-
- struct functions_evloop_globals *wg =
- functions_evloop_init(1, "APPS", &apps_and_stdout_mutex, &apps_plugin_exit);
-
- functions_evloop_add_function(wg, "processes", function_processes, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
-
- // ------------------------------------------------------------------------
-
- netdata_mutex_lock(&apps_and_stdout_mutex);
- APPS_PLUGIN_GLOBAL_FUNCTIONS();
-
- usec_t step = update_every * USEC_PER_SEC;
- global_iterations_counter = 1;
- heartbeat_t hb;
- heartbeat_init(&hb);
- for(; !apps_plugin_exit ; global_iterations_counter++) {
- netdata_mutex_unlock(&apps_and_stdout_mutex);
-
-#ifdef NETDATA_PROFILING
-#warning "compiling for profiling"
- static int profiling_count=0;
- profiling_count++;
- if(unlikely(profiling_count > 2000)) exit(0);
- usec_t dt = update_every * USEC_PER_SEC;
-#else
- usec_t dt = heartbeat_next(&hb, step);
-#endif
- netdata_mutex_lock(&apps_and_stdout_mutex);
-
- struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR };
- if (unlikely(poll(&pollfd, 1, 0) < 0)) {
- netdata_mutex_unlock(&apps_and_stdout_mutex);
- fatal("Cannot check if a pipe is available");
- }
- if (unlikely(pollfd.revents & POLLERR)) {
- netdata_mutex_unlock(&apps_and_stdout_mutex);
- fatal("Received error on read pipe.");
- }
-
- if(global_iterations_counter % 10 == 0)
- get_MemTotal();
-
- if(!collect_data_for_all_processes()) {
- netdata_log_error("Cannot collect /proc data for running processes. Disabling apps.plugin...");
- printf("DISABLE\n");
- netdata_mutex_unlock(&apps_and_stdout_mutex);
- exit(1);
- }
-
- calculate_netdata_statistics();
- normalize_utilization(apps_groups_root_target);
-
- if(send_resource_usage)
- send_resource_usage_to_netdata(dt);
-
-#ifndef __FreeBSD__
- send_proc_states_count(dt);
-#endif
-
- send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Apps");
- send_collected_data_to_netdata(apps_groups_root_target, "app", dt);
-
- if (enable_users_charts) {
- send_charts_updates_to_netdata(users_root_target, "user", "user", "Users");
- send_collected_data_to_netdata(users_root_target, "user", dt);
- }
-
- if (enable_groups_charts) {
- send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Groups");
- send_collected_data_to_netdata(groups_root_target, "usergroup", dt);
- }
-
- fflush(stdout);
-
- show_guest_time_old = show_guest_time;
-
- debug_log("done Loop No %zu", global_iterations_counter);
- }
- netdata_mutex_unlock(&apps_and_stdout_mutex);
-}
diff --git a/collectors/apps.plugin/integrations/applications.md b/collectors/apps.plugin/integrations/applications.md
deleted file mode 100644
index e5219fcc2..000000000
--- a/collectors/apps.plugin/integrations/applications.md
+++ /dev/null
@@ -1,114 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/integrations/applications.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/metadata.yaml"
-sidebar_label: "Applications"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Processes and System Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Applications
-
-
-<img src="https://netdata.cloud/img/applications.svg" width="150"/>
-
-
-Plugin: apps.plugin
-Module: apps
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Applications for optimal software performance and resource usage.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per applications group
-
-These metrics refer to the application group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.cpu_utilization | user, system | percentage |
-| app.cpu_guest_utilization | guest | percentage |
-| app.cpu_context_switches | voluntary, involuntary | switches/s |
-| app.mem_usage | rss | MiB |
-| app.mem_private_usage | mem | MiB |
-| app.vmem_usage | vmem | MiB |
-| app.mem_page_faults | minor, major | pgfaults/s |
-| app.swap_usage | swap | MiB |
-| app.disk_physical_io | reads, writes | KiB/s |
-| app.disk_logical_io | reads, writes | KiB/s |
-| app.processes | processes | processes |
-| app.threads | threads | threads |
-| app.fds_open_limit | limit | percentage |
-| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |
-| app.uptime | uptime | seconds |
-| app.uptime_summary | min, avg, max | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/apps.plugin/integrations/user_groups.md b/collectors/apps.plugin/integrations/user_groups.md
deleted file mode 100644
index 4ccbfc95f..000000000
--- a/collectors/apps.plugin/integrations/user_groups.md
+++ /dev/null
@@ -1,114 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/integrations/user_groups.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/metadata.yaml"
-sidebar_label: "User Groups"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Processes and System Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# User Groups
-
-
-<img src="https://netdata.cloud/img/user.svg" width="150"/>
-
-
-Plugin: apps.plugin
-Module: groups
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors resource utilization on a user groups context.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per user group
-
-These metrics refer to the user group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| user_group | The name of the user group. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| usergroup.cpu_utilization | user, system | percentage |
-| usergroup.cpu_guest_utilization | guest | percentage |
-| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |
-| usergroup.mem_usage | rss | MiB |
-| usergroup.mem_private_usage | mem | MiB |
-| usergroup.vmem_usage | vmem | MiB |
-| usergroup.mem_page_faults | minor, major | pgfaults/s |
-| usergroup.swap_usage | swap | MiB |
-| usergroup.disk_physical_io | reads, writes | KiB/s |
-| usergroup.disk_logical_io | reads, writes | KiB/s |
-| usergroup.processes | processes | processes |
-| usergroup.threads | threads | threads |
-| usergroup.fds_open_limit | limit | percentage |
-| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |
-| usergroup.uptime | uptime | seconds |
-| usergroup.uptime_summary | min, avg, max | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/apps.plugin/integrations/users.md b/collectors/apps.plugin/integrations/users.md
deleted file mode 100644
index c151fd8a2..000000000
--- a/collectors/apps.plugin/integrations/users.md
+++ /dev/null
@@ -1,114 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/integrations/users.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/apps.plugin/metadata.yaml"
-sidebar_label: "Users"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Processes and System Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Users
-
-
-<img src="https://netdata.cloud/img/users.svg" width="150"/>
-
-
-Plugin: apps.plugin
-Module: users
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors resource utilization on a user context.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per user
-
-These metrics refer to the user.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| user | The name of the user. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| user.cpu_utilization | user, system | percentage |
-| user.cpu_guest_utilization | guest | percentage |
-| user.cpu_context_switches | voluntary, involuntary | switches/s |
-| user.mem_usage | rss | MiB |
-| user.mem_private_usage | mem | MiB |
-| user.vmem_usage | vmem | MiB |
-| user.mem_page_faults | minor, major | pgfaults/s |
-| user.swap_usage | swap | MiB |
-| user.disk_physical_io | reads, writes | KiB/s |
-| user.disk_logical_io | reads, writes | KiB/s |
-| user.processes | processes | processes |
-| user.threads | threads | threads |
-| user.fds_open_limit | limit | percentage |
-| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |
-| user.uptime | uptime | seconds |
-| user.uptime_summary | min, avg, max | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/apps.plugin/metadata.yaml b/collectors/apps.plugin/metadata.yaml
deleted file mode 100644
index f5f22853a..000000000
--- a/collectors/apps.plugin/metadata.yaml
+++ /dev/null
@@ -1,554 +0,0 @@
-plugin_name: apps.plugin
-modules:
- # removed system.processes_state
- - meta:
- plugin_name: apps.plugin
- module_name: apps
- monitored_instance:
- name: Applications
- link: ""
- categories:
- - data-collection.processes-and-system-services
- icon_filename: "applications.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - applications
- - processes
- - os
- - host monitoring
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Applications for optimal software performance and resource usage."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: applications group
- description: These metrics refer to the application group.
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.cpu_utilization
- description: Apps CPU utilization (100% = 1 core)
- unit: percentage
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: app.cpu_guest_utilization
- description: Apps CPU guest utilization (100% = 1 core)
- unit: percentage
- chart_type: line
- dimensions:
- - name: guest
- - name: app.cpu_context_switches
- description: Apps CPU context switches
- unit: switches/s
- chart_type: stacked
- dimensions:
- - name: voluntary
- - name: involuntary
- - name: app.mem_usage
- description: Apps memory RSS usage
- unit: MiB
- chart_type: line
- dimensions:
- - name: rss
- - name: app.mem_private_usage
- description: Apps memory usage without shared
- unit: MiB
- chart_type: stacked
- dimensions:
- - name: mem
- - name: app.vmem_usage
- description: Apps virtual memory size
- unit: MiB
- chart_type: line
- dimensions:
- - name: vmem
- - name: app.mem_page_faults
- description: Apps memory page faults
- unit: pgfaults/s
- chart_type: stacked
- dimensions:
- - name: minor
- - name: major
- - name: app.swap_usage
- description: Apps swap usage
- unit: MiB
- chart_type: area
- dimensions:
- - name: swap
- - name: app.disk_physical_io
- description: Apps disk physical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: app.disk_logical_io
- description: Apps disk logical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: app.processes
- description: Apps processes
- unit: processes
- chart_type: line
- dimensions:
- - name: processes
- - name: app.threads
- description: Apps threads
- unit: threads
- chart_type: line
- dimensions:
- - name: threads
- - name: app.fds_open_limit
- description: Apps open file descriptors limit
- unit: percentage
- chart_type: line
- dimensions:
- - name: limit
- - name: app.fds_open
- description: Apps open file descriptors
- unit: fds
- chart_type: stacked
- dimensions:
- - name: files
- - name: sockets
- - name: pipes
- - name: inotifies
- - name: event
- - name: timer
- - name: signal
- - name: eventpolls
- - name: other
- - name: app.uptime
- description: Apps uptime
- unit: seconds
- chart_type: line
- dimensions:
- - name: uptime
- - name: app.uptime_summary
- description: Apps uptime summary
- unit: seconds
- chart_type: area
- dimensions:
- - name: min
- - name: avg
- - name: max
- - meta:
- plugin_name: apps.plugin
- module_name: groups
- monitored_instance:
- name: User Groups
- link: ""
- categories:
- - data-collection.processes-and-system-services
- icon_filename: "user.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - groups
- - processes
- - user auditing
- - authorization
- - os
- - host monitoring
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors resource utilization on a user groups context."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: user group
- description: These metrics refer to the user group.
- labels:
- - name: user_group
- description: The name of the user group.
- metrics:
- - name: usergroup.cpu_utilization
- description: User Groups CPU utilization (100% = 1 core)
- unit: percentage
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: usergroup.cpu_guest_utilization
- description: User Groups CPU guest utilization (100% = 1 core)
- unit: percentage
- chart_type: line
- dimensions:
- - name: guest
- - name: usergroup.cpu_context_switches
- description: User Groups CPU context switches
- unit: switches/s
- chart_type: stacked
- dimensions:
- - name: voluntary
- - name: involuntary
- - name: usergroup.mem_usage
- description: User Groups memory RSS usage
- unit: MiB
- chart_type: area
- dimensions:
- - name: rss
- - name: usergroup.mem_private_usage
- description: User Groups memory usage without shared
- unit: MiB
- chart_type: area
- dimensions:
- - name: mem
- - name: usergroup.vmem_usage
- description: User Groups virtual memory size
- unit: MiB
- chart_type: line
- dimensions:
- - name: vmem
- - name: usergroup.mem_page_faults
- description: User Groups memory page faults
- unit: pgfaults/s
- chart_type: stacked
- dimensions:
- - name: minor
- - name: major
- - name: usergroup.swap_usage
- description: User Groups swap usage
- unit: MiB
- chart_type: area
- dimensions:
- - name: swap
- - name: usergroup.disk_physical_io
- description: User Groups disk physical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: usergroup.disk_logical_io
- description: User Groups disk logical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: usergroup.processes
- description: User Groups processes
- unit: processes
- chart_type: line
- dimensions:
- - name: processes
- - name: usergroup.threads
- description: User Groups threads
- unit: threads
- chart_type: line
- dimensions:
- - name: threads
- - name: usergroup.fds_open_limit
- description: User Groups open file descriptors limit
- unit: percentage
- chart_type: line
- dimensions:
- - name: limit
- - name: usergroup.fds_open
- description: User Groups open file descriptors
- unit: fds
- chart_type: stacked
- dimensions:
- - name: files
- - name: sockets
- - name: pipes
- - name: inotifies
- - name: event
- - name: timer
- - name: signal
- - name: eventpolls
- - name: other
- - name: usergroup.uptime
- description: User Groups uptime
- unit: seconds
- chart_type: line
- dimensions:
- - name: uptime
- - name: usergroup.uptime_summary
- description: User Groups uptime summary
- unit: seconds
- chart_type: area
- dimensions:
- - name: min
- - name: avg
- - name: max
- - meta:
- plugin_name: apps.plugin
- module_name: users
- monitored_instance:
- name: Users
- link: ""
- categories:
- - data-collection.processes-and-system-services
- icon_filename: "users.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - users
- - processes
- - os
- - host monitoring
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors resource utilization on a user context."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: user
- description: These metrics refer to the user.
- labels:
- - name: user
- description: The name of the user.
- metrics:
- - name: user.cpu_utilization
- description: User CPU utilization (100% = 1 core)
- unit: percentage
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: user.cpu_guest_utilization
- description: User CPU guest utilization (100% = 1 core)
- unit: percentage
- chart_type: line
- dimensions:
- - name: guest
- - name: user.cpu_context_switches
- description: User CPU context switches
- unit: switches/s
- chart_type: stacked
- dimensions:
- - name: voluntary
- - name: involuntary
- - name: user.mem_usage
- description: User memory RSS usage
- unit: MiB
- chart_type: area
- dimensions:
- - name: rss
- - name: user.mem_private_usage
- description: User memory usage without shared
- unit: MiB
- chart_type: area
- dimensions:
- - name: mem
- - name: user.vmem_usage
- description: User virtual memory size
- unit: MiB
- chart_type: line
- dimensions:
- - name: vmem
- - name: user.mem_page_faults
- description: User memory page faults
- unit: pgfaults/s
- chart_type: stacked
- dimensions:
- - name: minor
- - name: major
- - name: user.swap_usage
- description: User swap usage
- unit: MiB
- chart_type: area
- dimensions:
- - name: swap
- - name: user.disk_physical_io
- description: User disk physical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: user.disk_logical_io
- description: User disk logical IO
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: user.processes
- description: User processes
- unit: processes
- chart_type: line
- dimensions:
- - name: processes
- - name: user.threads
- description: User threads
- unit: threads
- chart_type: line
- dimensions:
- - name: threads
- - name: user.fds_open_limit
- description: User open file descriptors limit
- unit: percentage
- chart_type: line
- dimensions:
- - name: limit
- - name: user.fds_open
- description: User open file descriptors
- unit: fds
- chart_type: stacked
- dimensions:
- - name: files
- - name: sockets
- - name: pipes
- - name: inotifies
- - name: event
- - name: timer
- - name: signal
- - name: eventpolls
- - name: other
- - name: user.uptime
- description: User uptime
- unit: seconds
- chart_type: line
- dimensions:
- - name: uptime
- - name: user.uptime_summary
- description: User uptime summary
- unit: seconds
- chart_type: area
- dimensions:
- - name: min
- - name: avg
- - name: max
diff --git a/collectors/cgroups.plugin/Makefile.am b/collectors/cgroups.plugin/Makefile.am
deleted file mode 100644
index 0f6062420..000000000
--- a/collectors/cgroups.plugin/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-CLEANFILES = \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_plugins_SCRIPTS = \
- cgroup-name.sh \
- cgroup-network-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- cgroup-name.sh.in \
- cgroup-network-helper.sh.in \
- README.md \
- $(NULL)
diff --git a/collectors/cgroups.plugin/README.md b/collectors/cgroups.plugin/README.md
deleted file mode 100644
index ba6a20e5e..000000000
--- a/collectors/cgroups.plugin/README.md
+++ /dev/null
@@ -1,302 +0,0 @@
-<!--
-title: "Monitor Cgroups (cgroups.plugin)"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/README.md"
-sidebar_label: "Monitor Cgroups"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Virtualized environments/Containers"
--->
-
-# Monitor Cgroups (cgroups.plugin)
-
-You can monitor containers and virtual machines using **cgroups**.
-
-cgroups (or control groups), are a Linux kernel feature that provides accounting and resource usage limiting for
-processes. When cgroups are bundled with namespaces (i.e. isolation), they form what we usually call **containers**.
-
-cgroups are hierarchical, meaning that cgroups can contain child cgroups, which can contain more cgroups, etc. All
-accounting is reported (and resource usage limits are applied) also in a hierarchical way.
-
-To visualize cgroup metrics Netdata provides configuration for cherry picking the cgroups of interest. By default (
-without any configuration) Netdata should pick **systemd services**, all kinds of **containers** (lxc, docker, etc)
-and **virtual machines** spawn by managers that register them with cgroups (qemu, libvirt, etc).
-
-## Configuring Netdata for cgroups
-
-In general, no additional settings are required. Netdata discovers all available cgroups on the host system and
-collects their metrics.
-
-### How Netdata finds the available cgroups
-
-Linux exposes resource usage reporting and provides dynamic configuration for cgroups, using virtual files (usually)
-under `/sys/fs/cgroup`. Netdata reads `/proc/self/mountinfo` to detect the exact mount point of cgroups. Netdata also
-allows manual configuration of this mount point, using these settings:
-
-```text
-[plugin:cgroups]
- check for new cgroups every = 10
- path to /sys/fs/cgroup/cpuacct = /sys/fs/cgroup/cpuacct
- path to /sys/fs/cgroup/blkio = /sys/fs/cgroup/blkio
- path to /sys/fs/cgroup/memory = /sys/fs/cgroup/memory
- path to /sys/fs/cgroup/devices = /sys/fs/cgroup/devices
-```
-
-Netdata rescans these directories for added or removed cgroups every `check for new cgroups every` seconds.
-
-### Hierarchical search for cgroups
-
-Since cgroups are hierarchical, for each of the directories shown above, Netdata walks through the subdirectories
-recursively searching for cgroups (each subdirectory is another cgroup).
-
-To provide a sane default for this setting, Netdata uses the following pattern list (patterns starting with `!` give a
-negative match and their order is important: the first matching a path will be used):
-
-```text
-[plugin:cgroups]
- search for cgroups in subpaths matching = !*/init.scope !*-qemu !/init.scope !/system !/systemd !/user !/user.slice *
-```
-
-So, we disable checking for **child cgroups** in systemd internal
-cgroups ([systemd services are monitored by Netdata](#monitoring-systemd-services)), user cgroups (normally used for
-desktop and remote user sessions), qemu virtual machines (child cgroups of virtual machines) and `init.scope`. All
-others are enabled.
-
-### Unified cgroups (cgroups v2) support
-
-Netdata automatically detects cgroups version. If detection fails Netdata assumes v1.
-To switch to v2 manually add:
-
-```text
-[plugin:cgroups]
- use unified cgroups = yes
- path to unified cgroups = /sys/fs/cgroup
-```
-
-Unified cgroups use same name pattern matching as v1 cgroups. `cgroup_enable_systemd_services_detailed_memory` is
-currently unsupported when using unified cgroups.
-
-### Enabled cgroups
-
-To provide a sane default, Netdata uses the
-following [pattern list](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md):
-
-- Checks the pattern against the path of the cgroup
-
- ```text
- [plugin:cgroups]
- enable by default cgroups matching = !*/init.scope *.scope !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.slice !*.swap !*.user !/ !/docker !/libvirt !/lxc !/lxc/*/ns !/lxc/*/ns/* !/machine !/qemu !/system !/systemd !/user *
- ```
-
-- Checks the pattern against the name of the cgroup (as you see it on the dashboard)
-
- ```text
- [plugin:cgroups]
- enable by default cgroups names matching = *
- ```
-
-Renaming is configured with the following options:
-
-```text
-[plugin:cgroups]
- run script to rename cgroups matching = *.scope *docker* *lxc* *qemu* !/ !*.mount !*.partition !*.service !*.slice !*.swap !*.user *
- script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh
-```
-
-The whole point for the additional pattern list, is to limit the number of times the script will be called. Without this
-pattern list, the script might be called thousands of times, depending on the number of cgroups available in the system.
-
-The above pattern list is matched against the path of the cgroup. For matched cgroups, Netdata calls the
-script [cgroup-name.sh](https://raw.githubusercontent.com/netdata/netdata/master/collectors/cgroups.plugin/cgroup-name.sh)
-to get its name. This script queries `docker`, `kubectl`, `podman`, or applies heuristics to find give a name for the
-cgroup.
-
-#### Note on Podman container names
-
-Podman's security model is a lot more restrictive than Docker's, so Netdata will not be able to detect container names
-out of the box unless they were started by the same user as Netdata itself.
-
-If Podman is used in "rootful" mode, it's also possible to use `podman system service` to grant Netdata access to
-container names. To do this, ensure `podman system service` is running and Netdata has access
-to `/run/podman/podman.sock` (the default permissions as specified by upstream are `0600`, with owner `root`, so you
-will have to adjust the configuration).
-
-[Docker Socket Proxy (HAProxy)](https://github.com/Tecnativa/docker-socket-proxy) or [CetusGuard](https://github.com/hectorm/cetusguard)
-can also be used to give Netdata restricted access to the socket. Note that `PODMAN_HOST` in Netdata's environment should
-be set to the proxy's URL in this case.
-
-### Charts with zero metrics
-
-By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are
-ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be
-automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a
-chart instead of `auto` to enable it permanently. For example:
-
-```text
-[plugin:cgroups]
- enable memory (used mem including cache) = yes
-```
-
-You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero
-metrics for all internal Netdata plugins.
-
-### Alerts
-
-CPU and memory limits are watched and used to rise alerts. Memory usage for every cgroup is checked against `ram`
-and `ram+swap` limits. CPU usage for every cgroup is checked against `cpuset.cpus` and `cpu.cfs_period_us` + `cpu.cfs_quota_us` pair assigned for the cgroup. Configuration for the alerts is available in `health.d/cgroups.conf`
-file.
-
-## Monitoring systemd services
-
-Netdata monitors **systemd services**. Example:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/21964372/20cd7b84-db53-11e6-98a2-b9c986b082c0.png)
-
-Support per distribution:
-
-| system | charts shown | `/sys/fs/cgroup` tree | comments |
-|:----------------:|:------------:|:------------------------------------:|:--------------------------|
-| Arch Linux | YES | | |
-| Gentoo | NO | | can be enabled, see below |
-| Ubuntu 16.04 LTS | YES | | |
-| Ubuntu 16.10 | YES | [here](http://pastebin.com/PiWbQEXy) | |
-| Fedora 25 | YES | [here](http://pastebin.com/ax0373wF) | |
-| Debian 8 | NO | | can be enabled, see below |
-| AMI | NO | [here](http://pastebin.com/FrxmptjL) | not a systemd system |
-| CentOS 7.3.1611 | NO | [here](http://pastebin.com/SpzgezAg) | can be enabled, see below |
-
-### Monitored systemd service metrics
-
-- CPU utilization
-- Used memory
-- RSS memory
-- Mapped memory
-- Cache memory
-- Writeback memory
-- Memory minor page faults
-- Memory major page faults
-- Memory charging activity
-- Memory uncharging activity
-- Memory limit failures
-- Swap memory used
-- Disk read bandwidth
-- Disk write bandwidth
-- Disk read operations
-- Disk write operations
-- Throttle disk read bandwidth
-- Throttle disk write bandwidth
-- Throttle disk read operations
-- Throttle disk write operations
-- Queued disk read operations
-- Queued disk write operations
-- Merged disk read operations
-- Merged disk write operations
-
-### How to enable cgroup accounting on systemd systems that is by default disabled
-
-You can verify there is no accounting enabled, by running `systemd-cgtop`. The program will show only resources for
-cgroup `/`, but all services will show nothing.
-
-To enable cgroup accounting, execute this:
-
-```sh
-sed -e 's|^#Default\(.*\)Accounting=.*$|Default\1Accounting=yes|g' /etc/systemd/system.conf >/tmp/system.conf
-```
-
-To see the changes it made, run this:
-
-```sh
-# diff /etc/systemd/system.conf /tmp/system.conf
-40,44c40,44
-< #DefaultCPUAccounting=no
-< #DefaultIOAccounting=no
-< #DefaultBlockIOAccounting=no
-< #DefaultMemoryAccounting=no
-< #DefaultTasksAccounting=yes
----
-> DefaultCPUAccounting=yes
-> DefaultIOAccounting=yes
-> DefaultBlockIOAccounting=yes
-> DefaultMemoryAccounting=yes
-> DefaultTasksAccounting=yes
-```
-
-If you are happy with the changes, run:
-
-```sh
-# copy the file to the right location
-sudo cp /tmp/system.conf /etc/systemd/system.conf
-
-# restart systemd to take it into account
-sudo systemctl daemon-reexec
-```
-
-(`systemctl daemon-reload` does not reload the configuration of the server - so you have to
-execute `systemctl daemon-reexec`).
-
-Now, when you run `systemd-cgtop`, services will start reporting usage (if it does not, restart any service to wake it up). Refresh your Netdata dashboard, and you will have the charts too.
-
-In case memory accounting is missing, you will need to enable it at your kernel, by appending the following kernel boot
-options and rebooting:
-
-```sh
-cgroup_enable=memory swapaccount=1
-```
-
-You can add the above, directly at the `linux` line in your `/boot/grub/grub.cfg` or appending them to
-the `GRUB_CMDLINE_LINUX` in `/etc/default/grub` (in which case you will have to run `update-grub` before rebooting). On
-DigitalOcean debian images you may have to set it at `/etc/default/grub.d/50-cloudimg-settings.cfg`.
-
-Which systemd services are monitored by Netdata is determined by the following pattern list:
-
-```text
-[plugin:cgroups]
- cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
-```
-
-- - -
-
-## Monitoring ephemeral containers
-
-Netdata monitors containers automatically when it is installed at the host, or when it is installed in a container that
-has access to the `/proc` and `/sys` filesystems of the host.
-
-Network interfaces and cgroups (containers) are self-cleaned. When a network interface or container stops, Netdata might log
-a few errors in error.log complaining about files it cannot find, but immediately:
-
-1. It will detect this is a removed container or network interface
-2. It will freeze/pause all alerts for them
-3. It will mark their charts as obsolete
-4. Obsolete charts are not be offered on new dashboard sessions (so hit F5 and the charts are gone)
-5. Existing dashboard sessions will continue to see them, but of course they will not refresh
-6. Obsolete charts will be removed from memory, 1 hour after the last user viewed them (configurable
- with `[global].cleanup obsolete charts after seconds = 3600` (at `netdata.conf`).
-7. When obsolete charts are removed from memory they are also deleted from disk (configurable
- with `[global].delete obsolete charts files = yes`)
-
-### Monitored container metrics
-
-- CPU usage
-- CPU usage within the limits
-- CPU usage per core
-- Memory usage
-- Writeback memory
-- Memory activity
-- Memory page faults
-- Used memory
-- Used RAM within the limits
-- Memory utilization
-- Memory limit failures
-- I/O bandwidth (all disks)
-- Serviced I/O operations (all disks)
-- Throttle I/O bandwidth (all disks)
-- Throttle serviced I/O operations (all disks)
-- Queued I/O operations (all disks)
-- Merged I/O operations (all disks)
-- CPU pressure
-- Memory pressure
-- Memory full pressure
-- I/O pressure
-- I/O full pressure
-
-Network interfaces are monitored by means of
-the [proc plugin](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md#monitored-network-interface-metrics).
diff --git a/collectors/cgroups.plugin/cgroup-charts.c b/collectors/cgroups.plugin/cgroup-charts.c
deleted file mode 100644
index a89e8ac45..000000000
--- a/collectors/cgroups.plugin/cgroup-charts.c
+++ /dev/null
@@ -1,1526 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "cgroup-internals.h"
-
-void update_cpu_utilization_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_cpu;
-
- if (unlikely(!cg->st_cpu)) {
- char *title;
- char *context;
- int prio;
-
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services CPU utilization (100%% = 1 core)";
- context = "systemd.service.cpu.utilization";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD;
- } else {
- title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU)" : "CPU Usage (100%% = 1 core)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu" : "cgroup.cpu";
- prio = cgroup_containers_chart_priority;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_cpu = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, system_hz, RRD_ALGORITHM_INCREMENTAL);
- } else {
- cg->st_cpu_rd_user = rrddim_add(chart, "user", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- cg->st_cpu_rd_system = rrddim_add(chart, "system", NULL, 100, 1000000, RRD_ALGORITHM_INCREMENTAL);
- }
- }
-
- rrddim_set_by_pointer(chart, cg->st_cpu_rd_user, (collected_number)cg->cpuacct_stat.user);
- rrddim_set_by_pointer(chart, cg->st_cpu_rd_system, (collected_number)cg->cpuacct_stat.system);
- rrdset_done(chart);
-}
-
-void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_cpu_limit;
-
- if (unlikely(!cg->st_cpu_limit)) {
- char *title = "CPU Usage within the limits";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_limit" : "cgroup.cpu_limit";
- int prio = cgroup_containers_chart_priority - 1;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_cpu_limit = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_limit",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED))
- rrddim_add(chart, "used", NULL, 1, system_hz, RRD_ALGORITHM_ABSOLUTE);
- else
- rrddim_add(chart, "used", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- cg->prev_cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
- }
-
- NETDATA_DOUBLE cpu_usage = 0;
- cpu_usage = (NETDATA_DOUBLE)(cg->cpuacct_stat.user + cg->cpuacct_stat.system) * 100;
- NETDATA_DOUBLE cpu_used = 100 * (cpu_usage - cg->prev_cpu_usage) / (cpu_limit * cgroup_update_every);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(chart);
-
- rrddim_set(chart, "used", (cpu_used > 0) ? (collected_number)cpu_used : 0);
-
- cg->prev_cpu_usage = cpu_usage;
-
- rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, cpu_limit);
- rrdset_done(chart);
-}
-
-void update_cpu_throttled_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_cpu_nr_throttled;
-
- if (unlikely(!cg->st_cpu_nr_throttled)) {
- char *title = "CPU Throttled Runnable Periods";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled" : "cgroup.throttled";
- int prio = cgroup_containers_chart_priority + 10;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_cpu_nr_throttled = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "throttled",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "throttled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(chart, "throttled", (collected_number)cg->cpuacct_cpu_throttling.nr_throttled_perc);
- rrdset_done(chart);
-}
-
-void update_cpu_throttled_duration_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_cpu_throttled_time;
-
- if (unlikely(!cg->st_cpu_throttled_time)) {
- char *title = "CPU Throttled Time Duration";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttled_duration" : "cgroup.throttled_duration";
- int prio = cgroup_containers_chart_priority + 15;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_cpu_throttled_time = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "throttled_duration",
- NULL,
- "cpu",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "duration", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "duration", (collected_number)cg->cpuacct_cpu_throttling.throttled_time);
- rrdset_done(chart);
-}
-
-void update_cpu_shares_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_cpu_shares;
-
- if (unlikely(!cg->st_cpu_shares)) {
- char *title = "CPU Time Relative Share";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_shares" : "cgroup.cpu_shares";
- int prio = cgroup_containers_chart_priority + 20;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_cpu_shares = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_shares",
- NULL,
- "cpu",
- context,
- title,
- "shares",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "shares", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(chart, "shares", (collected_number)cg->cpuacct_cpu_shares.shares);
- rrdset_done(chart);
-}
-
-void update_cpu_per_core_usage_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- char id[RRD_ID_LENGTH_MAX + 1];
- unsigned int i;
-
- if (unlikely(!cg->st_cpu_per_core)) {
- char *title = k8s_is_kubepod(cg) ? "CPU Usage (100%% = 1000 mCPU) Per Core" : "CPU Usage (100%% = 1 core) Per Core";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_per_core" : "cgroup.cpu_per_core";
- int prio = cgroup_containers_chart_priority + 100;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- cg->st_cpu_per_core = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_per_core",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(cg->st_cpu_per_core, cg->chart_labels);
-
- for (i = 0; i < cg->cpuacct_usage.cpus; i++) {
- snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
- rrddim_add(cg->st_cpu_per_core, id, NULL, 100, 1000000000, RRD_ALGORITHM_INCREMENTAL);
- }
- }
-
- for (i = 0; i < cg->cpuacct_usage.cpus; i++) {
- snprintfz(id, RRD_ID_LENGTH_MAX, "cpu%u", i);
- rrddim_set(cg->st_cpu_per_core, id, (collected_number)cg->cpuacct_usage.cpu_percpu[i]);
- }
- rrdset_done(cg->st_cpu_per_core);
-}
-
-void update_mem_usage_detailed_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_mem;
-
- if (unlikely(!cg->st_mem)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Memory";
- context = "systemd.service.memory.ram.usage";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 15;
- } else {
- title = "Memory Usage";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem" : "cgroup.mem";
- prio = cgroup_containers_chart_priority + 220;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
-
- chart = cg->st_mem = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem",
- NULL,
- "mem",
- context,
- title,
- "MiB",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_add(chart, "cache", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "rss", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- if (cg->memory.detailed_has_swap)
- rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_add(chart, "rss_huge", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "mapped_file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- } else {
- rrddim_add(chart, "anon", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "kernel_stack", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "slab", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "sock", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "anon_thp", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "file", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
- }
-
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set(chart, "cache", (collected_number)cg->memory.total_cache);
- collected_number rss = (collected_number)(cg->memory.total_rss - cg->memory.total_rss_huge);
- if (rss < 0)
- rss = 0;
- rrddim_set(chart, "rss", rss);
- if (cg->memory.detailed_has_swap)
- rrddim_set(chart, "swap", (collected_number)cg->memory.total_swap);
- rrddim_set(chart, "rss_huge", (collected_number)cg->memory.total_rss_huge);
- rrddim_set(chart, "mapped_file", (collected_number)cg->memory.total_mapped_file);
- } else {
- rrddim_set(chart, "anon", (collected_number)cg->memory.anon);
- rrddim_set(chart, "kernel_stack", (collected_number)cg->memory.kernel_stack);
- rrddim_set(chart, "slab", (collected_number)cg->memory.slab);
- rrddim_set(chart, "sock", (collected_number)cg->memory.sock);
- rrddim_set(chart, "anon_thp", (collected_number)cg->memory.anon_thp);
- rrddim_set(chart, "file", (collected_number)cg->memory.total_mapped_file);
- }
- rrdset_done(chart);
-}
-
-void update_mem_writeback_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_writeback;
-
- if (unlikely(!cg->st_writeback)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Writeback Memory";
- context = "systemd.service.memory.writeback";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 20;
- } else {
- title = "Writeback Memory";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.writeback" : "cgroup.writeback";
- prio = cgroup_containers_chart_priority + 300;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_writeback = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "writeback",
- NULL,
- "mem",
- context,
- title,
- "MiB",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- if (cg->memory.detailed_has_dirty)
- rrddim_add(chart, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "writeback", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if (cg->memory.detailed_has_dirty)
- rrddim_set(chart, "dirty", (collected_number)cg->memory.total_dirty);
- rrddim_set(chart, "writeback", (collected_number)cg->memory.total_writeback);
- rrdset_done(chart);
-}
-
-void update_mem_activity_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_mem_activity;
-
- if (unlikely(!cg->st_mem_activity)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Memory Paging IO";
- context = "systemd.service.memory.paging.io";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 30;
- } else {
- title = "Memory Activity";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_activity" : "cgroup.mem_activity";
- prio = cgroup_containers_chart_priority + 400;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_mem_activity = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_activity",
- NULL,
- "mem",
- context,
- title,
- "MiB/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- // FIXME: systemd just in, out
- rrddim_add(chart, "pgpgin", "in", system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart, "pgpgout", "out", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "pgpgin", (collected_number)cg->memory.total_pgpgin);
- rrddim_set(chart, "pgpgout", (collected_number)cg->memory.total_pgpgout);
- rrdset_done(chart);
-}
-
-void update_mem_pgfaults_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_pgfaults;
-
- if (unlikely(!cg->st_pgfaults)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Memory Page Faults";
- context = "systemd.service.memory.paging.faults";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 25;
- } else {
- title = "Memory Page Faults";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.pgfaults" : "cgroup.pgfaults";
- prio = cgroup_containers_chart_priority + 500;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_pgfaults = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "pgfaults",
- NULL,
- "mem",
- context,
- title,
- "MiB/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "pgfault", NULL, system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart, "pgmajfault", "swap", -system_page_size, 1024 * 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "pgfault", (collected_number)cg->memory.total_pgfault);
- rrddim_set(chart, "pgmajfault", (collected_number)cg->memory.total_pgmajfault);
- rrdset_done(chart);
-}
-
-void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_mem_usage_limit;
-
- if (unlikely(!cg->st_mem_usage_limit)) {
- char *title = "Used RAM within the limits";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage_limit" : "cgroup.mem_usage_limit";
- int prio = cgroup_containers_chart_priority + 200;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_mem_usage_limit = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_usage_limit",
- NULL,
- "mem",
- context,
- title,
- "MiB",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- rrddim_add(chart, "available", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrdset_isnot_obsolete___safe_from_collector_thread(chart);
-
- rrddim_set(chart, "available", (collected_number)(memory_limit - cg->memory.usage_in_bytes));
- rrddim_set(chart, "used", (collected_number)cg->memory.usage_in_bytes);
- rrdset_done(chart);
-}
-
-void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- RRDSET *chart = cg->st_mem_utilization;
-
- if (unlikely(!cg->st_mem_utilization)) {
- char *title = "Memory Utilization";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_utilization" : "cgroup.mem_utilization";
- int prio = cgroup_containers_chart_priority + 199;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_mem_utilization = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_utilization",
- NULL,
- "mem",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- rrddim_add(chart, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrdset_isnot_obsolete___safe_from_collector_thread(chart);
- collected_number util = (collected_number)(cg->memory.usage_in_bytes * 100 / memory_limit);
- rrddim_set(chart, "utilization", util);
- rrdset_done(chart);
-}
-
-void update_mem_failcnt_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_mem_failcnt;
-
- if (unlikely(!cg->st_mem_failcnt)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Memory Limit Failures";
- context = "systemd.service.memory.failcnt";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 10;
- } else {
- title = "Memory Limit Failures";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_failcnt" : "cgroup.mem_failcnt";
- prio = cgroup_containers_chart_priority + 250;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_mem_failcnt = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_failcnt",
- NULL,
- "mem",
- context,
- title,
- "count",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "failures", (collected_number)cg->memory.failcnt);
- rrdset_done(chart);
-}
-
-void update_mem_usage_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_mem_usage;
-
- if (unlikely(!cg->st_mem_usage)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Used Memory";
- context = "systemd.service.memory.usage";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 5;
- } else {
- title = "Used Memory";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.mem_usage" : "cgroup.mem_usage";
- prio = cgroup_containers_chart_priority + 210;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_mem_usage = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_usage",
- NULL,
- "mem",
- context,
- title,
- "MiB",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- cg->st_mem_rd_ram = rrddim_add(chart, "ram", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- cg->st_mem_rd_swap = rrddim_add(chart, "swap", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, cg->st_mem_rd_ram, (collected_number)cg->memory.usage_in_bytes);
-
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- rrddim_set_by_pointer(
- chart,
- cg->st_mem_rd_swap,
- cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ?
- (collected_number)(cg->memory.msw_usage_in_bytes -
- (cg->memory.usage_in_bytes + cg->memory.total_inactive_file)) :
- 0);
- } else {
- rrddim_set_by_pointer(chart, cg->st_mem_rd_swap, (collected_number)cg->memory.msw_usage_in_bytes);
- }
-
- rrdset_done(chart);
-}
-
-void update_io_serviced_bytes_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_io;
-
- if (unlikely(!cg->st_io)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Disk Read/Write Bandwidth";
- context = "systemd.service.disk.io";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 35;
- } else {
- title = "I/O Bandwidth (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.io" : "cgroup.io";
- prio = cgroup_containers_chart_priority + 1200;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_io = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "io",
- NULL,
- "disk",
- context,
- title,
- "KiB/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- cg->st_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- cg->st_io_rd_written = rrddim_add(cg->st_io, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, cg->st_io_rd_read, (collected_number)cg->io_service_bytes.Read);
- rrddim_set_by_pointer(chart, cg->st_io_rd_written, (collected_number)cg->io_service_bytes.Write);
- rrdset_done(chart);
-}
-
-void update_io_serviced_ops_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_serviced_ops;
-
- if (unlikely(!cg->st_serviced_ops)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Disk Read/Write Operations";
- context = "systemd.service.disk.iops";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 40;
- } else {
- title = "Serviced I/O Operations (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.serviced_ops" : "cgroup.serviced_ops";
- prio = cgroup_containers_chart_priority + 1200;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "serviced_ops",
- NULL,
- "disk",
- context,
- title,
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "read", (collected_number)cg->io_serviced.Read);
- rrddim_set(chart, "write", (collected_number)cg->io_serviced.Write);
- rrdset_done(chart);
-}
-
-void update_throttle_io_serviced_bytes_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_throttle_io;
-
- if (unlikely(!cg->st_throttle_io)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Throttle Disk Read/Write Bandwidth";
- context = "systemd.service.disk.throttle.io";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 45;
- } else {
- title = "Throttle I/O Bandwidth (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_io" : "cgroup.throttle_io";
- prio = cgroup_containers_chart_priority + 1200;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_throttle_io = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "throttle_io",
- NULL,
- "disk",
- context,
- title,
- "KiB/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
-
- cg->st_throttle_io_rd_read = rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- cg->st_throttle_io_rd_written = rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_read, (collected_number)cg->throttle_io_service_bytes.Read);
- rrddim_set_by_pointer(chart, cg->st_throttle_io_rd_written, (collected_number)cg->throttle_io_service_bytes.Write);
- rrdset_done(chart);
-}
-
-void update_throttle_io_serviced_ops_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_throttle_serviced_ops;
-
- if (unlikely(!cg->st_throttle_serviced_ops)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Throttle Disk Read/Write Operations";
- context = "systemd.service.disk.throttle.iops";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 50;
- } else {
- title = "Throttle Serviced I/O Operations (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.throttle_serviced_ops" : "cgroup.throttle_serviced_ops";
- prio = cgroup_containers_chart_priority + 1200;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_throttle_serviced_ops = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "throttle_serviced_ops",
- NULL,
- "disk",
- context,
- title,
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "read", (collected_number)cg->throttle_io_serviced.Read);
- rrddim_set(chart, "write", (collected_number)cg->throttle_io_serviced.Write);
- rrdset_done(chart);
-}
-
-void update_io_queued_ops_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_queued_ops;
-
- if (unlikely(!cg->st_queued_ops)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Queued Disk Read/Write Operations";
- context = "systemd.service.disk.queued_iops";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 55;
- } else {
- title = "Queued I/O Operations (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.queued_ops" : "cgroup.queued_ops";
- prio = cgroup_containers_chart_priority + 2000;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_queued_ops = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "queued_ops",
- NULL,
- "disk",
- context,
- title,
- "operations",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(chart, "write", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(chart, "read", (collected_number)cg->io_queued.Read);
- rrddim_set(chart, "write", (collected_number)cg->io_queued.Write);
- rrdset_done(chart);
-}
-
-void update_io_merged_ops_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_merged_ops;
-
- if (unlikely(!cg->st_merged_ops)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Merged Disk Read/Write Operations";
- context = "systemd.service.disk.merged_iops";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 60;
- } else {
- title = "Merged I/O Operations (all disks)";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.merged_ops" : "cgroup.merged_ops";
- prio = cgroup_containers_chart_priority + 2100;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_merged_ops = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "merged_ops",
- NULL,
- "disk",
- context,
- title,
- "operations/s",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- rrddim_add(chart, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(chart, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(chart, "read", (collected_number)cg->io_merged.Read);
- rrddim_set(chart, "write", (collected_number)cg->io_merged.Write);
- rrdset_done(chart);
-}
-
-void update_cpu_some_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->cpu_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "CPU some pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure" : "cgroup.cpu_some_pressure";
- int prio = cgroup_containers_chart_priority + 2200;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_some_pressure",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->cpu_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "CPU some pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_some_pressure_stall_time" : "cgroup.cpu_some_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2220;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_some_pressure_stall_time",
- NULL,
- "cpu",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_cpu_full_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->cpu_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "CPU full pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure" : "cgroup.cpu_full_pressure";
- int prio = cgroup_containers_chart_priority + 2240;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_full_pressure",
- NULL,
- "cpu",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->cpu_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "CPU full pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.cpu_full_pressure_stall_time" : "cgroup.cpu_full_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2260;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "cpu_full_pressure_stall_time",
- NULL,
- "cpu",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_mem_some_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->memory_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "Memory some pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure" : "cgroup.memory_some_pressure";
- int prio = cgroup_containers_chart_priority + 2300;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_some_pressure",
- NULL,
- "mem",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_mem_some_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->memory_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "Memory some pressure stall time";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_some_pressure_stall_time" :
- "cgroup.memory_some_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2320;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "memory_some_pressure_stall_time",
- NULL,
- "mem",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_mem_full_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->memory_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "Memory full pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure" : "cgroup.memory_full_pressure";
- int prio = cgroup_containers_chart_priority + 2340;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "mem_full_pressure",
- NULL,
- "mem",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_mem_full_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->memory_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "Memory full pressure stall time";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.memory_full_pressure_stall_time" :
- "cgroup.memory_full_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2360;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "memory_full_pressure_stall_time",
- NULL,
- "mem",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_irq_some_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->irq_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "IRQ some pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure" : "cgroup.irq_some_pressure";
- int prio = cgroup_containers_chart_priority + 2310;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "irq_some_pressure",
- NULL,
- "interrupts",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_irq_some_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->irq_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "IRQ some pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.irq_some_pressure_stall_time" : "cgroup.irq_some_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2330;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "irq_some_pressure_stall_time",
- NULL,
- "interrupts",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_irq_full_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->irq_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "IRQ full pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure" : "cgroup.irq_full_pressure";
- int prio = cgroup_containers_chart_priority + 2350;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "irq_full_pressure",
- NULL,
- "interrupts",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_irq_full_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->irq_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "IRQ full pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.irq_full_pressure_stall_time" : "cgroup.irq_full_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2370;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "irq_full_pressure_stall_time",
- NULL,
- "interrupts",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_io_some_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->io_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "I/O some pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure" : "cgroup.io_some_pressure";
- int prio = cgroup_containers_chart_priority + 2400;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "io_some_pressure",
- NULL,
- "disk",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "some 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "some 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "some 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_io_some_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->io_pressure;
- struct pressure_charts *pcs = &res->some;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "I/O some pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.io_some_pressure_stall_time" : "cgroup.io_some_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2420;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "io_some_pressure_stall_time",
- NULL,
- "disk",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_io_full_pressure_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->io_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->share_time.st;
-
- if (unlikely(!pcs->share_time.st)) {
- char *title = "I/O full pressure";
- char *context = k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure" : "cgroup.io_full_pressure";
- int prio = cgroup_containers_chart_priority + 2440;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->share_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "io_full_pressure",
- NULL,
- "disk",
- context,
- title,
- "percentage",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->share_time.rd10 = rrddim_add(chart, "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 = rrddim_add(chart, "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 = rrddim_add(chart, "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(chart, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(chart);
-}
-
-void update_io_full_pressure_stall_time_chart(struct cgroup *cg) {
- if (is_cgroup_systemd_service(cg))
- return;
-
- struct pressure *res = &cg->io_pressure;
- struct pressure_charts *pcs = &res->full;
- RRDSET *chart = pcs->total_time.st;
-
- if (unlikely(!pcs->total_time.st)) {
- char *title = "I/O full pressure stall time";
- char *context =
- k8s_is_kubepod(cg) ? "k8s.cgroup.io_full_pressure_stall_time" : "cgroup.io_full_pressure_stall_time";
- int prio = cgroup_containers_chart_priority + 2460;
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = pcs->total_time.st = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "io_full_pressure_stall_time",
- NULL,
- "disk",
- context,
- title,
- "ms",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- pcs->total_time.rdtotal = rrddim_add(chart, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(chart, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(chart);
-}
-
-void update_pids_current_chart(struct cgroup *cg) {
- RRDSET *chart = cg->st_pids;
-
- if (unlikely(!cg->st_pids)) {
- char *title;
- char *context;
- int prio;
- if (is_cgroup_systemd_service(cg)) {
- title = "Systemd Services Number of Processes";
- context = "systemd.service.pids.current";
- prio = NETDATA_CHART_PRIO_CGROUPS_SYSTEMD + 70;
- } else {
- title = "Number of processes";
- context = k8s_is_kubepod(cg) ? "k8s.cgroup.pids_current" : "cgroup.pids_current";
- prio = cgroup_containers_chart_priority + 2150;
- }
-
- char buff[RRD_ID_LENGTH_MAX + 1];
- chart = cg->st_pids = rrdset_create_localhost(
- cgroup_chart_type(buff, cg),
- "pids_current",
- NULL,
- "pids",
- context,
- title,
- "pids",
- PLUGIN_CGROUPS_NAME,
- is_cgroup_systemd_service(cg) ? PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME : PLUGIN_CGROUPS_MODULE_CGROUPS_NAME,
- prio,
- cgroup_update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_update_rrdlabels(chart, cg->chart_labels);
- cg->st_pids_rd_pids_current = rrddim_add(chart, "pids", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(chart, cg->st_pids_rd_pids_current, (collected_number)cg->pids.pids_current);
- rrdset_done(chart);
-}
diff --git a/collectors/cgroups.plugin/cgroup-discovery.c b/collectors/cgroups.plugin/cgroup-discovery.c
deleted file mode 100644
index ede35ed8a..000000000
--- a/collectors/cgroups.plugin/cgroup-discovery.c
+++ /dev/null
@@ -1,1245 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "cgroup-internals.h"
-
-// discovery cgroup thread worker jobs
-#define WORKER_DISCOVERY_INIT 0
-#define WORKER_DISCOVERY_FIND 1
-#define WORKER_DISCOVERY_PROCESS 2
-#define WORKER_DISCOVERY_PROCESS_RENAME 3
-#define WORKER_DISCOVERY_PROCESS_NETWORK 4
-#define WORKER_DISCOVERY_PROCESS_FIRST_TIME 5
-#define WORKER_DISCOVERY_UPDATE 6
-#define WORKER_DISCOVERY_CLEANUP 7
-#define WORKER_DISCOVERY_COPY 8
-#define WORKER_DISCOVERY_SHARE 9
-#define WORKER_DISCOVERY_LOCK 10
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 11
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 11
-#endif
-
-struct cgroup *discovered_cgroup_root = NULL;
-
-char cgroup_chart_id_prefix[] = "cgroup_";
-char services_chart_id_prefix[] = "systemd_";
-char *cgroups_rename_script = NULL;
-
-
-// ----------------------------------------------------------------------------
-
-static inline void free_pressure(struct pressure *res) {
- if (res->some.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.share_time.st);
- if (res->some.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->some.total_time.st);
- if (res->full.share_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.share_time.st);
- if (res->full.total_time.st) rrdset_is_obsolete___safe_from_collector_thread(res->full.total_time.st);
- freez(res->filename);
-}
-
-static inline void cgroup_free_network_interfaces(struct cgroup *cg) {
- while(cg->interfaces) {
- struct cgroup_network_interface *i = cg->interfaces;
- cg->interfaces = i->next;
-
- // delete the registration of proc_net_dev rename
- netdev_rename_device_del(i->host_device);
-
- freez((void *)i->host_device);
- freez((void *)i->container_device);
- freez((void *)i);
- }
-}
-
-static inline void cgroup_free(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "Removing cgroup '%s' with chart id '%s' (was %s and %s)", cg->id, cg->chart_id, (cg->enabled)?"enabled":"disabled", (cg->available)?"available":"not available");
-
- cgroup_netdev_delete(cg);
-
- if(cg->st_cpu) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu);
- if(cg->st_cpu_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit);
- if(cg->st_cpu_per_core) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_per_core);
- if(cg->st_cpu_nr_throttled) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_nr_throttled);
- if(cg->st_cpu_throttled_time) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_throttled_time);
- if(cg->st_cpu_shares) rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_shares);
- if(cg->st_mem) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem);
- if(cg->st_writeback) rrdset_is_obsolete___safe_from_collector_thread(cg->st_writeback);
- if(cg->st_mem_activity) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_activity);
- if(cg->st_pgfaults) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pgfaults);
- if(cg->st_mem_usage) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage);
- if(cg->st_mem_usage_limit) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit);
- if(cg->st_mem_utilization) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization);
- if(cg->st_mem_failcnt) rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_failcnt);
- if(cg->st_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_io);
- if(cg->st_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_serviced_ops);
- if(cg->st_throttle_io) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_io);
- if(cg->st_throttle_serviced_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_throttle_serviced_ops);
- if(cg->st_queued_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_queued_ops);
- if(cg->st_merged_ops) rrdset_is_obsolete___safe_from_collector_thread(cg->st_merged_ops);
- if(cg->st_pids) rrdset_is_obsolete___safe_from_collector_thread(cg->st_pids);
-
- freez(cg->filename_cpuset_cpus);
- freez(cg->filename_cpu_cfs_period);
- freez(cg->filename_cpu_cfs_quota);
- freez(cg->filename_memory_limit);
- freez(cg->filename_memoryswap_limit);
-
- cgroup_free_network_interfaces(cg);
-
- freez(cg->cpuacct_usage.cpu_percpu);
-
- freez(cg->cpuacct_stat.filename);
- freez(cg->cpuacct_usage.filename);
- freez(cg->cpuacct_cpu_throttling.filename);
- freez(cg->cpuacct_cpu_shares.filename);
-
- arl_free(cg->memory.arl_base);
- freez(cg->memory.filename_detailed);
- freez(cg->memory.filename_failcnt);
- freez(cg->memory.filename_usage_in_bytes);
- freez(cg->memory.filename_msw_usage_in_bytes);
-
- freez(cg->io_service_bytes.filename);
- freez(cg->io_serviced.filename);
-
- freez(cg->throttle_io_service_bytes.filename);
- freez(cg->throttle_io_serviced.filename);
-
- freez(cg->io_merged.filename);
- freez(cg->io_queued.filename);
- freez(cg->pids.pids_current_filename);
-
- free_pressure(&cg->cpu_pressure);
- free_pressure(&cg->io_pressure);
- free_pressure(&cg->memory_pressure);
- free_pressure(&cg->irq_pressure);
-
- freez(cg->id);
- freez(cg->intermediate_id);
- freez(cg->chart_id);
- freez(cg->name);
-
- rrdlabels_destroy(cg->chart_labels);
-
- freez(cg);
-
- cgroup_root_count--;
-}
-
-// ----------------------------------------------------------------------------
-// add/remove/find cgroup objects
-
-#define CGROUP_CHARTID_LINE_MAX 1024
-
-static inline char *cgroup_chart_id_strdupz(const char *s) {
- if(!s || !*s) s = "/";
-
- if(*s == '/' && s[1] != '\0') s++;
-
- char *r = strdupz(s);
- netdata_fix_chart_id(r);
-
- return r;
-}
-
-// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed
-static inline void substitute_dots_in_id(char *s) {
- // dots are used to distinguish chart type and id in streaming, so we should replace them
- for (char *d = s; *d; d++) {
- if (*d == '.')
- *d = '-';
- }
-}
-
-// ----------------------------------------------------------------------------
-// parse k8s labels
-
-char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data) {
- // the first word, up to the first space is the name
- char *name = strsep_skip_consecutive_separators(&data, " ");
-
- // the rest are key=value pairs separated by comma
- while(data) {
- char *pair = strsep_skip_consecutive_separators(&data, ",");
- rrdlabels_add_pair(labels, pair, RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S);
- }
-
- return name;
-}
-
-static inline void discovery_rename_cgroup(struct cgroup *cg) {
- if (!cg->pending_renames) {
- return;
- }
- cg->pending_renames--;
-
- netdata_log_debug(D_CGROUP, "looking for the name of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
- netdata_log_debug(D_CGROUP, "executing command %s \"%s\" for cgroup '%s'", cgroups_rename_script, cg->intermediate_id, cg->chart_id);
- pid_t cgroup_pid;
-
- FILE *fp_child_input, *fp_child_output;
- (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_rename_script, cg->id, cg->intermediate_id);
- if (!fp_child_output) {
- collector_error("CGROUP: cannot popen(%s \"%s\", \"r\").", cgroups_rename_script, cg->intermediate_id);
- cg->pending_renames = 0;
- cg->processed = 1;
- return;
- }
-
- char buffer[CGROUP_CHARTID_LINE_MAX + 1];
- char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, fp_child_output);
- int exit_code = netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
-
- switch (exit_code) {
- case 0:
- cg->pending_renames = 0;
- break;
-
- case 3:
- cg->pending_renames = 0;
- cg->processed = 1;
- break;
-
- default:
- break;
- }
-
- if (cg->pending_renames || cg->processed)
- return;
- if (!new_name || !*new_name || *new_name == '\n')
- return;
- if (!(new_name = trim(new_name)))
- return;
-
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
- // read the new labels and remove the obsolete ones
- rrdlabels_unmark_all(cg->chart_labels);
- char *name = cgroup_parse_resolved_name_and_labels(cg->chart_labels, new_name);
- rrdlabels_remove_all_unmarked(cg->chart_labels);
-
- freez(cg->name);
- cg->name = strdupz(name);
-
- freez(cg->chart_id);
- cg->chart_id = cgroup_chart_id_strdupz(name);
-
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-}
-
-static void is_cgroup_procs_exist(netdata_ebpf_cgroup_shm_body_t *out, char *id) {
- struct stat buf;
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id);
- if (likely(stat(out->path, &buf) == 0)) {
- return;
- }
-
- out->path[0] = '\0';
- out->enabled = 0;
-}
-
-static inline void convert_cgroup_to_systemd_service(struct cgroup *cg) {
- char buffer[CGROUP_CHARTID_LINE_MAX + 1];
- cg->options |= CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
- strncpyz(buffer, cg->id, CGROUP_CHARTID_LINE_MAX);
- char *s = buffer;
-
- // skip to the last slash
- size_t len = strlen(s);
- while (len--) {
- if (unlikely(s[len] == '/')) {
- break;
- }
- }
- if (len) {
- s = &s[len + 1];
- }
-
- // remove extension
- len = strlen(s);
- while (len--) {
- if (unlikely(s[len] == '.')) {
- break;
- }
- }
- if (len) {
- s[len] = '\0';
- }
-
- freez(cg->name);
- cg->name = strdupz(s);
-
- freez(cg->chart_id);
- cg->chart_id = cgroup_chart_id_strdupz(s);
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-}
-
-static inline struct cgroup *discovery_cgroup_add(const char *id) {
- netdata_log_debug(D_CGROUP, "adding to list, cgroup with id '%s'", id);
-
- struct cgroup *cg = callocz(1, sizeof(struct cgroup));
-
- cg->id = strdupz(id);
- cg->hash = simple_hash(cg->id);
-
- cg->name = strdupz(id);
-
- cg->intermediate_id = cgroup_chart_id_strdupz(id);
-
- cg->chart_id = cgroup_chart_id_strdupz(id);
- substitute_dots_in_id(cg->chart_id);
- cg->hash_chart_id = simple_hash(cg->chart_id);
-
- if (cgroup_use_unified_cgroups) {
- cg->options |= CGROUP_OPTIONS_IS_UNIFIED;
- }
-
- if (!discovered_cgroup_root)
- discovered_cgroup_root = cg;
- else {
- struct cgroup *t;
- for (t = discovered_cgroup_root; t->discovered_next; t = t->discovered_next) {
- }
- t->discovered_next = cg;
- }
-
- return cg;
-}
-
-static inline struct cgroup *discovery_cgroup_find(const char *id) {
- netdata_log_debug(D_CGROUP, "searching for cgroup '%s'", id);
-
- uint32_t hash = simple_hash(id);
-
- struct cgroup *cg;
- for(cg = discovered_cgroup_root; cg ; cg = cg->discovered_next) {
- if(hash == cg->hash && strcmp(id, cg->id) == 0)
- break;
- }
-
- netdata_log_debug(D_CGROUP, "cgroup '%s' %s in memory", id, (cg)?"found":"not found");
- return cg;
-}
-
-static int calc_cgroup_depth(const char *id) {
- int depth = 0;
- const char *s;
- for (s = id; *s; s++) {
- depth += unlikely(*s == '/');
- }
- return depth;
-}
-
-static inline void discovery_find_cgroup_in_dir_callback(const char *dir) {
- if (!dir || !*dir) {
- dir = "/";
- }
-
- netdata_log_debug(D_CGROUP, "examining cgroup dir '%s'", dir);
-
- struct cgroup *cg = discovery_cgroup_find(dir);
- if (cg) {
- cg->available = 1;
- return;
- }
-
- if (cgroup_root_count >= cgroup_root_max) {
- nd_log_limit_static_global_var(erl, 3600, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING, "CGROUP: maximum number of cgroups reached (%d). No more cgroups will be added.", cgroup_root_count);
- return;
- }
-
- if (cgroup_max_depth > 0) {
- int depth = calc_cgroup_depth(dir);
- if (depth > cgroup_max_depth) {
- nd_log_collector(NDLP_DEBUG, "CGROUP: '%s' is too deep (%d, while max is %d)", dir, depth, cgroup_max_depth);
- return;
- }
- }
-
- cg = discovery_cgroup_add(dir);
- cg->available = 1;
- cg->first_time_seen = 1;
- cg->function_ready = false;
- cgroup_root_count++;
-}
-
-static inline int discovery_find_dir_in_subdirs(const char *base, const char *this, void (*callback)(const char *)) {
- if(!this) this = base;
- netdata_log_debug(D_CGROUP, "searching for directories in '%s' (base '%s')", this?this:"", base);
-
- size_t dirlen = strlen(this), baselen = strlen(base);
-
- int ret = -1;
- int enabled = -1;
-
- const char *relative_path = &this[baselen];
- if(!*relative_path) relative_path = "/";
-
- DIR *dir = opendir(this);
- if(!dir) {
- collector_error("CGROUP: cannot read directory '%s'", base);
- return ret;
- }
- ret = 1;
-
- callback(relative_path);
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- ))
- continue;
-
- if(de->d_type == DT_DIR) {
- if(enabled == -1) {
- const char *r = relative_path;
- if(*r == '\0') r = "/";
-
- // do not decent in directories we are not interested
- enabled = matches_search_cgroup_paths(r);
- }
-
- if(enabled) {
- char *s = mallocz(dirlen + strlen(de->d_name) + 2);
- strcpy(s, this);
- strcat(s, "/");
- strcat(s, de->d_name);
- int ret2 = discovery_find_dir_in_subdirs(base, s, callback);
- if(ret2 > 0) ret += ret2;
- freez(s);
- }
- }
- }
-
- closedir(dir);
- return ret;
-}
-
-static inline void discovery_mark_as_unavailable_all_cgroups() {
- for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- cg->available = 0;
- }
-}
-
-static inline void discovery_update_filenames_cgroup_v1(struct cgroup *cg) {
- char filename[FILENAME_MAX + 1];
- struct stat buf;
-
- // CPU
- if (unlikely(cgroup_enable_cpuacct_stat && !cg->cpuacct_stat.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.stat", cgroup_cpuacct_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_stat.filename = strdupz(filename);
- cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuset.cpus", cgroup_cpuset_base, cg->id);
- cg->filename_cpuset_cpus = strdupz(filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_period_us", cgroup_cpuacct_base, cg->id);
- cg->filename_cpu_cfs_period = strdupz(filename);
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.cfs_quota_us", cgroup_cpuacct_base, cg->id);
- cg->filename_cpu_cfs_quota = strdupz(filename);
- }
- }
- // FIXME: remove usage_percpu
- if (unlikely(cgroup_enable_cpuacct_usage && !cg->cpuacct_usage.filename && !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpuacct.usage_percpu", cgroup_cpuacct_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_usage.filename = strdupz(filename);
- cg->cpuacct_usage.enabled = cgroup_enable_cpuacct_usage;
- }
- }
- if (unlikely(
- cgroup_enable_cpuacct_cpu_throttling && !cg->cpuacct_cpu_throttling.filename &&
- !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_cpuacct_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_throttling.filename = strdupz(filename);
- cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
- }
- }
- if (unlikely(
- cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename && !is_cgroup_systemd_service(cg))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.shares", cgroup_cpuacct_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_shares.filename = strdupz(filename);
- cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
- }
- }
-
- // Memory
- if (unlikely(
- (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed &&
- (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_memory_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_detailed = strdupz(filename);
- cg->memory.enabled_detailed =
- (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO;
- }
- }
- if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.usage_in_bytes", cgroup_memory_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.limit_in_bytes", cgroup_memory_base, cg->id);
- cg->filename_memory_limit = strdupz(filename);
- }
- }
- if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.usage_in_bytes", cgroup_memory_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.memsw.limit_in_bytes", cgroup_memory_base, cg->id);
- cg->filename_memoryswap_limit = strdupz(filename);
- }
- }
- if (unlikely(cgroup_enable_memory_failcnt && !cg->memory.filename_failcnt)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.failcnt", cgroup_memory_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_failcnt = strdupz(filename);
- cg->memory.enabled_failcnt = cgroup_enable_memory_failcnt;
- }
- }
-
- // Blkio
- if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_service_bytes", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- }
- }
- }
- if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_serviced", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- }
- }
- }
- if (unlikely(cgroup_enable_blkio_throttle_io && !cg->throttle_io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->throttle_io_service_bytes.filename = strdupz(filename);
- cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_service_bytes", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->throttle_io_service_bytes.filename = strdupz(filename);
- cg->throttle_io_service_bytes.enabled = cgroup_enable_blkio_throttle_io;
- }
- }
- }
- if (unlikely(cgroup_enable_blkio_throttle_ops && !cg->throttle_io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->throttle_io_serviced.filename = strdupz(filename);
- cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.throttle.io_serviced", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->throttle_io_serviced.filename = strdupz(filename);
- cg->throttle_io_serviced.enabled = cgroup_enable_blkio_throttle_ops;
- }
- }
- }
- if (unlikely(cgroup_enable_blkio_merged_ops && !cg->io_merged.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_merged.filename = strdupz(filename);
- cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_merged", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_merged.filename = strdupz(filename);
- cg->io_merged.enabled = cgroup_enable_blkio_merged_ops;
- }
- }
- }
- if (unlikely(cgroup_enable_blkio_queued_ops && !cg->io_queued.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued_recursive", cgroup_blkio_base, cg->id);
- if (unlikely(stat(filename, &buf) != -1)) {
- cg->io_queued.filename = strdupz(filename);
- cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s/blkio.io_queued", cgroup_blkio_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_queued.filename = strdupz(filename);
- cg->io_queued.enabled = cgroup_enable_blkio_queued_ops;
- }
- }
- }
-
- // Pids
- if (unlikely(!cg->pids.pids_current_filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_pids_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->pids.pids_current_filename = strdupz(filename);
- }
- }
-}
-
-static inline void discovery_update_filenames_cgroup_v2(struct cgroup *cg) {
- char filename[FILENAME_MAX + 1];
- struct stat buf;
-
- // CPU
- if (unlikely((cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_cpu_throttling) && !cg->cpuacct_stat.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.stat", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_stat.filename = strdupz(filename);
- cg->cpuacct_stat.enabled = cgroup_enable_cpuacct_stat;
- cg->cpuacct_cpu_throttling.enabled = cgroup_enable_cpuacct_cpu_throttling;
- cg->filename_cpuset_cpus = NULL;
- cg->filename_cpu_cfs_period = NULL;
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.max", cgroup_unified_base, cg->id);
- cg->filename_cpu_cfs_quota = strdupz(filename);
- }
- }
- if (unlikely(cgroup_enable_cpuacct_cpu_shares && !cg->cpuacct_cpu_shares.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.weight", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpuacct_cpu_shares.filename = strdupz(filename);
- cg->cpuacct_cpu_shares.enabled = cgroup_enable_cpuacct_cpu_shares;
- }
- }
-
- // Memory
- // FIXME: this if condition!
- if (unlikely(
- (cgroup_enable_detailed_memory || cgroup_used_memory) && !cg->memory.filename_detailed &&
- (cgroup_used_memory || cgroup_enable_systemd_services_detailed_memory || !is_cgroup_systemd_service(cg)))) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.stat", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_detailed = strdupz(filename);
- cg->memory.enabled_detailed =
- (cgroup_enable_detailed_memory == CONFIG_BOOLEAN_YES) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_AUTO;
- }
- }
-
- if (unlikely(cgroup_enable_memory && !cg->memory.filename_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.current", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_usage_in_bytes = cgroup_enable_memory;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.max", cgroup_unified_base, cg->id);
- cg->filename_memory_limit = strdupz(filename);
- }
- }
-
- if (unlikely(cgroup_enable_swap && !cg->memory.filename_msw_usage_in_bytes)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.current", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory.filename_msw_usage_in_bytes = strdupz(filename);
- cg->memory.enabled_msw_usage_in_bytes = cgroup_enable_swap;
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.swap.max", cgroup_unified_base, cg->id);
- cg->filename_memoryswap_limit = strdupz(filename);
- }
- }
-
- // Blkio
- if (unlikely(cgroup_enable_blkio_io && !cg->io_service_bytes.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_service_bytes.filename = strdupz(filename);
- cg->io_service_bytes.enabled = cgroup_enable_blkio_io;
- }
- }
-
- if (unlikely(cgroup_enable_blkio_ops && !cg->io_serviced.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.stat", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_serviced.filename = strdupz(filename);
- cg->io_serviced.enabled = cgroup_enable_blkio_ops;
- }
- }
-
- // PSI
- if (unlikely(cgroup_enable_pressure_cpu && !cg->cpu_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/cpu.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->cpu_pressure.filename = strdupz(filename);
- cg->cpu_pressure.some.enabled = cgroup_enable_pressure_cpu;
- cg->cpu_pressure.full.enabled = CONFIG_BOOLEAN_NO;
- }
- }
-
- if (unlikely((cgroup_enable_pressure_io_some || cgroup_enable_pressure_io_full) && !cg->io_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/io.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->io_pressure.filename = strdupz(filename);
- cg->io_pressure.some.enabled = cgroup_enable_pressure_io_some;
- cg->io_pressure.full.enabled = cgroup_enable_pressure_io_full;
- }
- }
-
- if (unlikely(
- (cgroup_enable_pressure_memory_some || cgroup_enable_pressure_memory_full) &&
- !cg->memory_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/memory.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->memory_pressure.filename = strdupz(filename);
- cg->memory_pressure.some.enabled = cgroup_enable_pressure_memory_some;
- cg->memory_pressure.full.enabled = cgroup_enable_pressure_memory_full;
- }
- }
-
- if (unlikely((cgroup_enable_pressure_irq_some || cgroup_enable_pressure_irq_full) && !cg->irq_pressure.filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/irq.pressure", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->irq_pressure.filename = strdupz(filename);
- cg->irq_pressure.some.enabled = cgroup_enable_pressure_irq_some;
- cg->irq_pressure.full.enabled = cgroup_enable_pressure_irq_full;
- }
- }
-
- // Pids
- if (unlikely(!cg->pids.pids_current_filename)) {
- snprintfz(filename, FILENAME_MAX, "%s%s/pids.current", cgroup_unified_base, cg->id);
- if (likely(stat(filename, &buf) != -1)) {
- cg->pids.pids_current_filename = strdupz(filename);
- }
- }
-}
-
-static inline void discovery_update_filenames_all_cgroups() {
- for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- if (unlikely(!cg->available || !cg->enabled || cg->pending_renames))
- continue;
-
- if (!cgroup_use_unified_cgroups)
- discovery_update_filenames_cgroup_v1(cg);
- else if (likely(cgroup_unified_exist))
- discovery_update_filenames_cgroup_v2(cg);
- }
-}
-
-static inline void discovery_cleanup_all_cgroups() {
- struct cgroup *cg = discovered_cgroup_root, *last = NULL;
-
- for(; cg ;) {
- if(!cg->available) {
- // enable the first duplicate cgroup
- {
- struct cgroup *t;
- for (t = discovered_cgroup_root; t; t = t->discovered_next) {
- if (t != cg && t->available && !t->enabled && t->options & CGROUP_OPTIONS_DISABLED_DUPLICATE &&
- (is_cgroup_systemd_service(t) == is_cgroup_systemd_service(cg)) &&
- t->hash_chart_id == cg->hash_chart_id && !strcmp(t->chart_id, cg->chart_id)) {
- netdata_log_debug(D_CGROUP, "Enabling duplicate of cgroup '%s' with id '%s', because the original with id '%s' stopped.", t->chart_id, t->id, cg->id);
- t->enabled = 1;
- t->options &= ~CGROUP_OPTIONS_DISABLED_DUPLICATE;
- break;
- }
- }
- }
-
- if(!last)
- discovered_cgroup_root = cg->discovered_next;
- else
- last->discovered_next = cg->discovered_next;
-
- cgroup_free(cg);
-
- if(!last)
- cg = discovered_cgroup_root;
- else
- cg = last->discovered_next;
- }
- else {
- last = cg;
- cg = cg->discovered_next;
- }
- }
-}
-
-static inline void discovery_copy_discovered_cgroups_to_reader() {
- netdata_log_debug(D_CGROUP, "copy discovered cgroups to the main group list");
-
- struct cgroup *cg;
-
- for (cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- cg->next = cg->discovered_next;
- }
-
- cgroup_root = discovered_cgroup_root;
-}
-
-static inline void discovery_share_cgroups_with_ebpf() {
- struct cgroup *cg;
- int count;
- struct stat buf;
-
- if (shm_mutex_cgroup_ebpf == SEM_FAILED) {
- return;
- }
- sem_wait(shm_mutex_cgroup_ebpf);
-
- for (cg = cgroup_root, count = 0; cg; cg = cg->next, count++) {
- netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count];
- char *prefix = (is_cgroup_systemd_service(cg)) ? services_chart_id_prefix : cgroup_chart_id_prefix;
- snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_id);
- ptr->hash = simple_hash(ptr->name);
- ptr->options = cg->options;
- ptr->enabled = cg->enabled;
- if (cgroup_use_unified_cgroups) {
- snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id);
- if (likely(stat(ptr->path, &buf) == -1)) {
- ptr->path[0] = '\0';
- ptr->enabled = 0;
- }
- } else {
- is_cgroup_procs_exist(ptr, cg->id);
- }
-
- netdata_log_debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled);
- }
-
- shm_cgroup_ebpf.header->cgroup_root_count = count;
- sem_post(shm_mutex_cgroup_ebpf);
-}
-
-static inline void discovery_find_all_cgroups_v1() {
- if (cgroup_enable_cpuacct_stat || cgroup_enable_cpuacct_usage) {
- if (discovery_find_dir_in_subdirs(cgroup_cpuacct_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_cpuacct_stat = cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled cpu statistics.");
- }
- }
-
- if (cgroup_enable_blkio_io || cgroup_enable_blkio_ops || cgroup_enable_blkio_throttle_io ||
- cgroup_enable_blkio_throttle_ops || cgroup_enable_blkio_merged_ops || cgroup_enable_blkio_queued_ops) {
- if (discovery_find_dir_in_subdirs(cgroup_blkio_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_blkio_io = cgroup_enable_blkio_ops = cgroup_enable_blkio_throttle_io =
- cgroup_enable_blkio_throttle_ops = cgroup_enable_blkio_merged_ops = cgroup_enable_blkio_queued_ops =
- CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled blkio statistics.");
- }
- }
-
- if (cgroup_enable_memory || cgroup_enable_detailed_memory || cgroup_enable_swap || cgroup_enable_memory_failcnt) {
- if (discovery_find_dir_in_subdirs(cgroup_memory_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_enable_memory = cgroup_enable_detailed_memory = cgroup_enable_swap = cgroup_enable_memory_failcnt =
- CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled memory statistics.");
- }
- }
-
- if (cgroup_search_in_devices) {
- if (discovery_find_dir_in_subdirs(cgroup_devices_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_search_in_devices = 0;
- collector_error("CGROUP: disabled devices statistics.");
- }
- }
-}
-
-static inline void discovery_find_all_cgroups_v2() {
- if (discovery_find_dir_in_subdirs(cgroup_unified_base, NULL, discovery_find_cgroup_in_dir_callback) == -1) {
- cgroup_unified_exist = CONFIG_BOOLEAN_NO;
- collector_error("CGROUP: disabled unified cgroups statistics.");
- }
-}
-
-static int is_digits_only(const char *s) {
- do {
- if (!isdigit(*s++)) {
- return 0;
- }
- } while (*s);
-
- return 1;
-}
-
-static int is_cgroup_k8s_container(const char *id) {
- // examples:
- // https://github.com/netdata/netdata/blob/0fc101679dcd12f1cb8acdd07bb4c85d8e553e53/collectors/cgroups.plugin/cgroup-name.sh#L121-L147
- const char *p = id;
- const char *pp = NULL;
- int i = 0;
- size_t l = 3; // pod
- while ((p = strstr(p, "pod"))) {
- i++;
- p += l;
- pp = p;
- }
- return !(i < 2 || !pp || !(pp = strchr(pp, '/')) || !pp++ || !*pp);
-}
-
-#define TASK_COMM_LEN 16
-
-static int k8s_get_container_first_proc_comm(const char *id, char *comm) {
- if (!is_cgroup_k8s_container(id)) {
- return 1;
- }
-
- static procfile *ff = NULL;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s/cgroup.procs", cgroup_cpuacct_base, id);
-
- ff = procfile_reopen(ff, filename, NULL, CGROUP_PROCFILE_FLAG);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
- return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
- return 1;
- }
-
- unsigned long lines = procfile_lines(ff);
- if (likely(lines < 2)) {
- return 1;
- }
-
- char *pid = procfile_lineword(ff, 0, 0);
- if (!pid || !*pid) {
- return 1;
- }
-
- snprintfz(filename, FILENAME_MAX, "%s/proc/%s/comm", netdata_configured_host_prefix, pid);
-
- ff = procfile_reopen(ff, filename, NULL, PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot open file '%s'.", filename);
- return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- netdata_log_debug(D_CGROUP, "CGROUP: k8s_is_pause_container(): cannot read file '%s'.", filename);
- return 1;
- }
-
- lines = procfile_lines(ff);
- if (unlikely(lines != 2)) {
- return 1;
- }
-
- char *proc_comm = procfile_lineword(ff, 0, 0);
- if (!proc_comm || !*proc_comm) {
- return 1;
- }
-
- strncpyz(comm, proc_comm, TASK_COMM_LEN);
- return 0;
-}
-
-static inline void discovery_process_first_time_seen_cgroup(struct cgroup *cg) {
- if (!cg->first_time_seen) {
- return;
- }
- cg->first_time_seen = 0;
-
- char comm[TASK_COMM_LEN + 1];
-
- if (cg->container_orchestrator == CGROUPS_ORCHESTRATOR_UNSET) {
- if (strstr(cg->id, "kubepods")) {
- cg->container_orchestrator = CGROUPS_ORCHESTRATOR_K8S;
- } else {
- cg->container_orchestrator = CGROUPS_ORCHESTRATOR_UNKNOWN;
- }
- }
-
- if (is_inside_k8s && !k8s_get_container_first_proc_comm(cg->id, comm)) {
- // container initialization may take some time when CPU % is high
- // seen on GKE: comm is '6' before 'runc:[2:INIT]' (dunno if it could be another number)
- if (is_digits_only(comm) || matches_entrypoint_parent_process_comm(comm)) {
- cg->first_time_seen = 1;
- return;
- }
- if (!strcmp(comm, "pause")) {
- // a container that holds the network namespace for the pod
- // we don't need to collect its metrics
- cg->processed = 1;
- return;
- }
- }
-
- if (cgroup_enable_systemd_services && matches_systemd_services_cgroups(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'cgroups to match as systemd services'", cg->id, cg->chart_id);
- convert_cgroup_to_systemd_service(cg);
- return;
- }
-
- if (matches_enabled_cgroup_renames(cg->id)) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') matches 'run script to rename cgroups matching', will try to rename it", cg->id, cg->chart_id);
- if (is_inside_k8s && is_cgroup_k8s_container(cg->id)) {
- // it may take up to a minute for the K8s API to return data for the container
- // tested on AWS K8s cluster with 100% CPU utilization
- cg->pending_renames = 9; // 1.5 minute
- } else {
- cg->pending_renames = 2;
- }
- }
-}
-
-static int discovery_is_cgroup_duplicate(struct cgroup *cg) {
- // https://github.com/netdata/netdata/issues/797#issuecomment-241248884
- struct cgroup *c;
- for (c = discovered_cgroup_root; c; c = c->discovered_next) {
- if (c != cg && c->enabled && (is_cgroup_systemd_service(c) == is_cgroup_systemd_service(cg)) &&
- c->hash_chart_id == cg->hash_chart_id && !strcmp(c->chart_id, cg->chart_id)) {
- collector_error(
- "CGROUP: chart id '%s' already exists with id '%s' and is enabled and available. Disabling cgroup with id '%s'.",
- cg->chart_id,
- c->id,
- cg->id);
- return 1;
- }
- }
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// cgroup network interfaces
-
-#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
-
-static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "looking for the network interfaces of cgroup '%s' with chart id '%s'", cg->id, cg->chart_id);
-
- pid_t cgroup_pid;
- char cgroup_identifier[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_cpuacct_base, cg->id);
- }
- else {
- snprintfz(cgroup_identifier, CGROUP_NETWORK_INTERFACE_MAX_LINE, "%s%s", cgroup_unified_base, cg->id);
- }
-
- netdata_log_debug(D_CGROUP, "executing cgroup_identifier %s --cgroup '%s' for cgroup '%s'", cgroups_network_interface_script, cgroup_identifier, cg->id);
- FILE *fp_child_input, *fp_child_output;
- (void)netdata_popen_raw_default_flags_and_environment(&cgroup_pid, &fp_child_input, &fp_child_output, cgroups_network_interface_script, "--cgroup", cgroup_identifier);
- if(!fp_child_output) {
- collector_error("CGROUP: cannot popen(%s --cgroup \"%s\", \"r\").", cgroups_network_interface_script, cgroup_identifier);
- return;
- }
-
- char *s;
- char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) {
- trim(s);
-
- if(*s && *s != '\n') {
- char *t = s;
- while(*t && *t != ' ') t++;
- if(*t == ' ') {
- *t = '\0';
- t++;
- }
-
- if(!*s) {
- collector_error("CGROUP: empty host interface returned by script");
- continue;
- }
-
- if(!*t) {
- collector_error("CGROUP: empty guest interface returned by script");
- continue;
- }
-
- struct cgroup_network_interface *i = callocz(1, sizeof(struct cgroup_network_interface));
- i->host_device = strdupz(s);
- i->container_device = strdupz(t);
- i->next = cg->interfaces;
- cg->interfaces = i;
-
- collector_info("CGROUP: cgroup '%s' has network interface '%s' as '%s'", cg->id, i->host_device, i->container_device);
-
- // register a device rename to proc_net_dev.c
- netdev_rename_device_add(i->host_device, i->container_device, cg->chart_id, cg->chart_labels,
- k8s_is_kubepod(cg) ? "k8s." : "", cgroup_netdev_get(cg));
- }
- }
-
- netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
- // netdata_log_debug(D_CGROUP, "closed cgroup_identifier for cgroup '%s'", cg->id);
-}
-
-static inline void discovery_process_cgroup(struct cgroup *cg) {
- if (!cg->available || cg->processed) {
- return;
- }
-
- if (cg->first_time_seen) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS_FIRST_TIME);
- discovery_process_first_time_seen_cgroup(cg);
- if (unlikely(cg->first_time_seen || cg->processed)) {
- return;
- }
- }
-
- if (cg->pending_renames) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS_RENAME);
- discovery_rename_cgroup(cg);
- if (unlikely(cg->pending_renames || cg->processed)) {
- return;
- }
- }
-
- cg->processed = 1;
-
- if ((strlen(cg->chart_id) + strlen(cgroup_chart_id_prefix)) >= RRD_ID_LENGTH_MAX) {
- collector_info("cgroup '%s' (chart id '%s') disabled because chart_id exceeds the limit (RRD_ID_LENGTH_MAX)", cg->id, cg->chart_id);
- return;
- }
-
- if (is_cgroup_systemd_service(cg)) {
- if (discovery_is_cgroup_duplicate(cg)) {
- cg->enabled = 0;
- cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- return;
- }
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
- rrdlabels_add(cg->chart_labels, "service_name", cg->name, RRDLABEL_SRC_AUTO);
- cg->enabled = 1;
- return;
- }
-
- if (!(cg->enabled = matches_enabled_cgroup_names(cg->name))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups names matching'", cg->id, cg->name);
- return;
- }
-
- if (!(cg->enabled = matches_enabled_cgroup_paths(cg->id))) {
- netdata_log_debug(D_CGROUP, "cgroup '%s' (name '%s') disabled by 'enable by default cgroups matching'", cg->id, cg->name);
- return;
- }
-
- if (discovery_is_cgroup_duplicate(cg)) {
- cg->enabled = 0;
- cg->options |= CGROUP_OPTIONS_DISABLED_DUPLICATE;
- return;
- }
-
- if (!cg->chart_labels)
- cg->chart_labels = rrdlabels_create();
-
- if (!k8s_is_kubepod(cg)) {
- rrdlabels_add(cg->chart_labels, "cgroup_name", cg->name, RRDLABEL_SRC_AUTO);
- if (!rrdlabels_exist(cg->chart_labels, "image"))
- rrdlabels_add(cg->chart_labels, "image", "", RRDLABEL_SRC_AUTO);
- }
-
- worker_is_busy(WORKER_DISCOVERY_PROCESS_NETWORK);
- read_cgroup_network_interfaces(cg);
-}
-
-static inline void discovery_find_all_cgroups() {
- netdata_log_debug(D_CGROUP, "searching for cgroups");
-
- worker_is_busy(WORKER_DISCOVERY_INIT);
- discovery_mark_as_unavailable_all_cgroups();
-
- worker_is_busy(WORKER_DISCOVERY_FIND);
- if (!cgroup_use_unified_cgroups) {
- discovery_find_all_cgroups_v1();
- } else {
- discovery_find_all_cgroups_v2();
- }
-
- for (struct cgroup *cg = discovered_cgroup_root; cg; cg = cg->discovered_next) {
- worker_is_busy(WORKER_DISCOVERY_PROCESS);
- discovery_process_cgroup(cg);
- }
-
- worker_is_busy(WORKER_DISCOVERY_UPDATE);
- discovery_update_filenames_all_cgroups();
-
- worker_is_busy(WORKER_DISCOVERY_LOCK);
- uv_mutex_lock(&cgroup_root_mutex);
-
- worker_is_busy(WORKER_DISCOVERY_CLEANUP);
- discovery_cleanup_all_cgroups();
-
- worker_is_busy(WORKER_DISCOVERY_COPY);
- discovery_copy_discovered_cgroups_to_reader();
-
- uv_mutex_unlock(&cgroup_root_mutex);
-
- worker_is_busy(WORKER_DISCOVERY_SHARE);
- discovery_share_cgroups_with_ebpf();
-
- netdata_log_debug(D_CGROUP, "done searching for cgroups");
-}
-
-void cgroup_discovery_worker(void *ptr)
-{
- UNUSED(ptr);
-
- worker_register("CGROUPSDISC");
- worker_register_job_name(WORKER_DISCOVERY_INIT, "init");
- worker_register_job_name(WORKER_DISCOVERY_FIND, "find");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS, "process");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_RENAME, "rename");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_NETWORK, "network");
- worker_register_job_name(WORKER_DISCOVERY_PROCESS_FIRST_TIME, "new");
- worker_register_job_name(WORKER_DISCOVERY_UPDATE, "update");
- worker_register_job_name(WORKER_DISCOVERY_CLEANUP, "cleanup");
- worker_register_job_name(WORKER_DISCOVERY_COPY, "copy");
- worker_register_job_name(WORKER_DISCOVERY_SHARE, "share");
- worker_register_job_name(WORKER_DISCOVERY_LOCK, "lock");
-
- entrypoint_parent_process_comm = simple_pattern_create(
- " runc:[* " // http://terenceli.github.io/%E6%8A%80%E6%9C%AF/2021/12/28/runc-internals-3)
- " exe ", // https://github.com/falcosecurity/falco/blob/9d41b0a151b83693929d3a9c84f7c5c85d070d3a/rules/falco_rules.yaml#L1961
- NULL,
- SIMPLE_PATTERN_EXACT, true);
-
- service_register(SERVICE_THREAD_TYPE_LIBUV, NULL, NULL, NULL, false);
-
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
-
- uv_mutex_lock(&discovery_thread.mutex);
- uv_cond_wait(&discovery_thread.cond_var, &discovery_thread.mutex);
- uv_mutex_unlock(&discovery_thread.mutex);
-
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- discovery_find_all_cgroups();
- }
- collector_info("discovery thread stopped");
- worker_unregister();
- service_exits();
- __atomic_store_n(&discovery_thread.exited,1,__ATOMIC_RELAXED);
-}
diff --git a/collectors/cgroups.plugin/cgroup-internals.h b/collectors/cgroups.plugin/cgroup-internals.h
deleted file mode 100644
index a69802240..000000000
--- a/collectors/cgroups.plugin/cgroup-internals.h
+++ /dev/null
@@ -1,514 +0,0 @@
-#include "sys_fs_cgroup.h"
-
-#ifndef NETDATA_CGROUP_INTERNALS_H
-#define NETDATA_CGROUP_INTERNALS_H 1
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_DEFAULT
-#else
-#define CGROUP_PROCFILE_FLAG PROCFILE_FLAG_NO_ERROR_ON_FILE_IO
-#endif
-
-struct blkio {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int delay_counter;
-
- char *filename;
-
- unsigned long long Read;
- unsigned long long Write;
-/*
- unsigned long long Sync;
- unsigned long long Async;
- unsigned long long Total;
-*/
-};
-
-struct pids {
- char *pids_current_filename;
- int pids_current_updated;
- unsigned long long pids_current;
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
-struct memory {
- ARL_BASE *arl_base;
- ARL_ENTRY *arl_dirty;
- ARL_ENTRY *arl_swap;
-
- int updated_detailed;
- int updated_usage_in_bytes;
- int updated_msw_usage_in_bytes;
- int updated_failcnt;
-
- int enabled_detailed; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_msw_usage_in_bytes; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
- int enabled_failcnt; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- int delay_counter_detailed;
- int delay_counter_failcnt;
-
- char *filename_detailed;
- char *filename_usage_in_bytes;
- char *filename_msw_usage_in_bytes;
- char *filename_failcnt;
-
- int detailed_has_dirty;
- int detailed_has_swap;
-
- // detailed metrics
-/*
- unsigned long long cache;
- unsigned long long rss;
- unsigned long long rss_huge;
- unsigned long long mapped_file;
- unsigned long long writeback;
- unsigned long long dirty;
- unsigned long long swap;
- unsigned long long pgpgin;
- unsigned long long pgpgout;
- unsigned long long pgfault;
- unsigned long long pgmajfault;
- unsigned long long inactive_anon;
- unsigned long long active_anon;
- unsigned long long inactive_file;
- unsigned long long active_file;
- unsigned long long unevictable;
- unsigned long long hierarchical_memory_limit;
-*/
- //unified cgroups metrics
- unsigned long long anon;
- unsigned long long kernel_stack;
- unsigned long long slab;
- unsigned long long sock;
- // unsigned long long shmem;
- unsigned long long anon_thp;
- //unsigned long long file_writeback;
- //unsigned long long file_dirty;
- //unsigned long long file;
-
- unsigned long long total_cache;
- unsigned long long total_rss;
- unsigned long long total_rss_huge;
- unsigned long long total_mapped_file;
- unsigned long long total_writeback;
- unsigned long long total_dirty;
- unsigned long long total_swap;
- unsigned long long total_pgpgin;
- unsigned long long total_pgpgout;
- unsigned long long total_pgfault;
- unsigned long long total_pgmajfault;
-/*
- unsigned long long total_inactive_anon;
- unsigned long long total_active_anon;
-*/
-
- unsigned long long total_inactive_file;
-
-/*
- unsigned long long total_active_file;
- unsigned long long total_unevictable;
-*/
-
- // single file metrics
- unsigned long long usage_in_bytes;
- unsigned long long msw_usage_in_bytes;
- unsigned long long failcnt;
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
-struct cpuacct_stat {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long user; // v1, v2(user_usec)
- unsigned long long system; // v1, v2(system_usec)
-};
-
-// https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
-struct cpuacct_usage {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned int cpus;
- unsigned long long *cpu_percpu;
-};
-
-// represents cpuacct/cpu.stat, for v2 'cpuacct_stat' is used for 'user_usec', 'system_usec'
-struct cpuacct_cpu_throttling {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long nr_periods;
- unsigned long long nr_throttled;
- unsigned long long throttled_time;
-
- unsigned long long nr_throttled_perc;
-};
-
-// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu#sect-cfs
-// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications_managing-monitoring-and-updating-the-kernel#proc_controlling-distribution-of-cpu-time-for-applications-by-adjusting-cpu-weight_using-cgroups-v2-to-control-distribution-of-cpu-time-for-applications
-struct cpuacct_cpu_shares {
- int updated;
- int enabled; // CONFIG_BOOLEAN_YES or CONFIG_BOOLEAN_AUTO
-
- char *filename;
-
- unsigned long long shares;
-};
-
-struct cgroup_network_interface {
- const char *host_device;
- const char *container_device;
- struct cgroup_network_interface *next;
-};
-
-enum cgroups_container_orchestrator {
- CGROUPS_ORCHESTRATOR_UNSET,
- CGROUPS_ORCHESTRATOR_UNKNOWN,
- CGROUPS_ORCHESTRATOR_K8S
-};
-
-
-// *** WARNING *** The fields are not thread safe. Take care of safe usage.
-struct cgroup {
- uint32_t options;
-
- int first_time_seen; // first time seen by the discoverer
- int processed; // the discoverer is done processing a cgroup (resolved name, set 'enabled' option)
-
- char available; // found in the filesystem
- char enabled; // enabled in the config
-
- bool function_ready; // true after the first iteration of chart creation/update
-
- char pending_renames;
-
- char *id;
- uint32_t hash;
-
- char *intermediate_id; // TODO: remove it when the renaming script is fixed
-
- char *chart_id;
- uint32_t hash_chart_id;
-
- // 'cgroup_name' label value.
- // by default this is the *id (path), later changed to the resolved name (cgroup-name.sh) or systemd service name.
- char *name;
-
- RRDLABELS *chart_labels;
-
- int container_orchestrator;
-
- struct cpuacct_stat cpuacct_stat;
- struct cpuacct_usage cpuacct_usage;
- struct cpuacct_cpu_throttling cpuacct_cpu_throttling;
- struct cpuacct_cpu_shares cpuacct_cpu_shares;
-
- struct memory memory;
-
- struct blkio io_service_bytes; // bytes
- struct blkio io_serviced; // operations
-
- struct blkio throttle_io_service_bytes; // bytes
- struct blkio throttle_io_serviced; // operations
-
- struct blkio io_merged; // operations
- struct blkio io_queued; // operations
-
- struct pids pids;
-
- struct cgroup_network_interface *interfaces;
-
- struct pressure cpu_pressure;
- struct pressure io_pressure;
- struct pressure memory_pressure;
- struct pressure irq_pressure;
-
- // Cpu
- RRDSET *st_cpu;
- RRDDIM *st_cpu_rd_user;
- RRDDIM *st_cpu_rd_system;
-
- RRDSET *st_cpu_limit;
- RRDSET *st_cpu_per_core;
- RRDSET *st_cpu_nr_throttled;
- RRDSET *st_cpu_throttled_time;
- RRDSET *st_cpu_shares;
-
- // Memory
- RRDSET *st_mem;
- RRDDIM *st_mem_rd_ram;
- RRDDIM *st_mem_rd_swap;
-
- RRDSET *st_mem_utilization;
- RRDSET *st_writeback;
- RRDSET *st_mem_activity;
- RRDSET *st_pgfaults;
- RRDSET *st_mem_usage;
- RRDSET *st_mem_usage_limit;
- RRDSET *st_mem_failcnt;
-
- // Blkio
- RRDSET *st_io;
- RRDDIM *st_io_rd_read;
- RRDDIM *st_io_rd_written;
-
- RRDSET *st_serviced_ops;
-
- RRDSET *st_throttle_io;
- RRDDIM *st_throttle_io_rd_read;
- RRDDIM *st_throttle_io_rd_written;
-
- RRDSET *st_throttle_serviced_ops;
-
- RRDSET *st_queued_ops;
- RRDSET *st_merged_ops;
-
- // Pids
- RRDSET *st_pids;
- RRDDIM *st_pids_rd_pids_current;
-
- // per cgroup chart variables
- char *filename_cpuset_cpus;
- unsigned long long cpuset_cpus;
-
- char *filename_cpu_cfs_period;
- unsigned long long cpu_cfs_period;
-
- char *filename_cpu_cfs_quota;
- unsigned long long cpu_cfs_quota;
-
- const RRDSETVAR_ACQUIRED *chart_var_cpu_limit;
- NETDATA_DOUBLE prev_cpu_usage;
-
- char *filename_memory_limit;
- unsigned long long memory_limit;
- const RRDSETVAR_ACQUIRED *chart_var_memory_limit;
-
- char *filename_memoryswap_limit;
- unsigned long long memoryswap_limit;
- const RRDSETVAR_ACQUIRED *chart_var_memoryswap_limit;
-
- const DICTIONARY_ITEM *cgroup_netdev_link;
-
- struct cgroup *next;
- struct cgroup *discovered_next;
-
-};
-
-struct discovery_thread {
- uv_thread_t thread;
- uv_mutex_t mutex;
- uv_cond_t cond_var;
- int exited;
-};
-
-extern struct discovery_thread discovery_thread;
-
-extern char *cgroups_rename_script;
-extern char cgroup_chart_id_prefix[];
-extern char services_chart_id_prefix[];
-extern uv_mutex_t cgroup_root_mutex;
-
-void cgroup_discovery_worker(void *ptr);
-
-extern int is_inside_k8s;
-extern long system_page_size;
-extern int cgroup_enable_cpuacct_stat;
-extern int cgroup_enable_cpuacct_usage;
-extern int cgroup_enable_cpuacct_cpu_throttling;
-extern int cgroup_enable_cpuacct_cpu_shares;
-extern int cgroup_enable_memory;
-extern int cgroup_enable_detailed_memory;
-extern int cgroup_enable_memory_failcnt;
-extern int cgroup_enable_swap;
-extern int cgroup_enable_blkio_io;
-extern int cgroup_enable_blkio_ops;
-extern int cgroup_enable_blkio_throttle_io;
-extern int cgroup_enable_blkio_throttle_ops;
-extern int cgroup_enable_blkio_merged_ops;
-extern int cgroup_enable_blkio_queued_ops;
-extern int cgroup_enable_pressure_cpu;
-extern int cgroup_enable_pressure_io_some;
-extern int cgroup_enable_pressure_io_full;
-extern int cgroup_enable_pressure_memory_some;
-extern int cgroup_enable_pressure_memory_full;
-extern int cgroup_enable_pressure_irq_some;
-extern int cgroup_enable_pressure_irq_full;
-extern int cgroup_enable_systemd_services;
-extern int cgroup_enable_systemd_services_detailed_memory;
-extern int cgroup_used_memory;
-extern int cgroup_use_unified_cgroups;
-extern int cgroup_unified_exist;
-extern int cgroup_search_in_devices;
-extern int cgroup_check_for_new_every;
-extern int cgroup_update_every;
-extern int cgroup_containers_chart_priority;
-extern int cgroup_recheck_zero_blkio_every_iterations;
-extern int cgroup_recheck_zero_mem_failcnt_every_iterations;
-extern int cgroup_recheck_zero_mem_detailed_every_iterations;
-extern char *cgroup_cpuacct_base;
-extern char *cgroup_cpuset_base;
-extern char *cgroup_blkio_base;
-extern char *cgroup_memory_base;
-extern char *cgroup_pids_base;
-extern char *cgroup_devices_base;
-extern char *cgroup_unified_base;
-extern int cgroup_root_count;
-extern int cgroup_root_max;
-extern int cgroup_max_depth;
-extern SIMPLE_PATTERN *enabled_cgroup_paths;
-extern SIMPLE_PATTERN *enabled_cgroup_names;
-extern SIMPLE_PATTERN *search_cgroup_paths;
-extern SIMPLE_PATTERN *enabled_cgroup_renames;
-extern SIMPLE_PATTERN *systemd_services_cgroups;
-extern SIMPLE_PATTERN *entrypoint_parent_process_comm;
-extern char *cgroups_network_interface_script;
-extern int cgroups_check;
-extern uint32_t Read_hash;
-extern uint32_t Write_hash;
-extern uint32_t user_hash;
-extern uint32_t system_hash;
-extern uint32_t user_usec_hash;
-extern uint32_t system_usec_hash;
-extern uint32_t nr_periods_hash;
-extern uint32_t nr_throttled_hash;
-extern uint32_t throttled_time_hash;
-extern uint32_t throttled_usec_hash;
-extern struct cgroup *cgroup_root;
-
-extern netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf;
-extern int shm_fd_cgroup_ebpf;
-extern sem_t *shm_mutex_cgroup_ebpf;
-
-enum cgroups_type { CGROUPS_AUTODETECT_FAIL, CGROUPS_V1, CGROUPS_V2 };
-
-enum cgroups_systemd_setting {
- SYSTEMD_CGROUP_ERR,
- SYSTEMD_CGROUP_LEGACY,
- SYSTEMD_CGROUP_HYBRID,
- SYSTEMD_CGROUP_UNIFIED
-};
-
-struct cgroups_systemd_config_setting {
- char *name;
- enum cgroups_systemd_setting setting;
-};
-
-extern struct cgroups_systemd_config_setting cgroups_systemd_options[];
-
-static inline int matches_enabled_cgroup_paths(char *id) {
- return simple_pattern_matches(enabled_cgroup_paths, id);
-}
-
-static inline int matches_enabled_cgroup_names(char *name) {
- return simple_pattern_matches(enabled_cgroup_names, name);
-}
-
-static inline int matches_enabled_cgroup_renames(char *id) {
- return simple_pattern_matches(enabled_cgroup_renames, id);
-}
-
-static inline int matches_systemd_services_cgroups(char *id) {
- return simple_pattern_matches(systemd_services_cgroups, id);
-}
-
-static inline int matches_search_cgroup_paths(const char *dir) {
- return simple_pattern_matches(search_cgroup_paths, dir);
-}
-
-static inline int matches_entrypoint_parent_process_comm(const char *comm) {
- return simple_pattern_matches(entrypoint_parent_process_comm, comm);
-}
-
-static inline int is_cgroup_systemd_service(struct cgroup *cg) {
- return (int)(cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE);
-}
-
-static inline int k8s_is_kubepod(struct cgroup *cg) {
- return cg->container_orchestrator == CGROUPS_ORCHESTRATOR_K8S;
-}
-
-static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
- buffer[0] = '\0';
-
- if (cg->chart_id[0] == '\0' || (cg->chart_id[0] == '/' && cg->chart_id[1] == '\0'))
- strncpy(buffer, "cgroup_root", RRD_ID_LENGTH_MAX);
- else if (is_cgroup_systemd_service(cg))
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", services_chart_id_prefix, cg->chart_id);
- else
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s%s", cgroup_chart_id_prefix, cg->chart_id);
-
- return buffer;
-}
-
-#define RRDFUNCTIONS_CGTOP_HELP "View running containers"
-
-int cgroup_function_cgroup_top(BUFFER *wb, int timeout, const char *function, void *collector_data,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data);
-int cgroup_function_systemd_top(BUFFER *wb, int timeout, const char *function, void *collector_data,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb, void *register_canceller_cb_data);
-
-void cgroup_netdev_link_init(void);
-const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg);
-void cgroup_netdev_delete(struct cgroup *cg);
-
-void update_cpu_utilization_chart(struct cgroup *cg);
-void update_cpu_utilization_limit_chart(struct cgroup *cg, NETDATA_DOUBLE cpu_limit);
-void update_cpu_throttled_chart(struct cgroup *cg);
-void update_cpu_throttled_duration_chart(struct cgroup *cg);
-void update_cpu_shares_chart(struct cgroup *cg);
-void update_cpu_per_core_usage_chart(struct cgroup *cg);
-
-void update_mem_usage_limit_chart(struct cgroup *cg, unsigned long long memory_limit);
-void update_mem_utilization_chart(struct cgroup *cg, unsigned long long memory_limit);
-void update_mem_usage_detailed_chart(struct cgroup *cg);
-void update_mem_writeback_chart(struct cgroup *cg);
-void update_mem_activity_chart(struct cgroup *cg);
-void update_mem_pgfaults_chart(struct cgroup *cg);
-void update_mem_failcnt_chart(struct cgroup *cg);
-void update_mem_usage_chart(struct cgroup *cg);
-
-void update_io_serviced_bytes_chart(struct cgroup *cg);
-void update_io_serviced_ops_chart(struct cgroup *cg);
-void update_throttle_io_serviced_bytes_chart(struct cgroup *cg);
-void update_throttle_io_serviced_ops_chart(struct cgroup *cg);
-void update_io_queued_ops_chart(struct cgroup *cg);
-void update_io_merged_ops_chart(struct cgroup *cg);
-
-void update_pids_current_chart(struct cgroup *cg);
-
-void update_cpu_some_pressure_chart(struct cgroup *cg);
-void update_cpu_some_pressure_stall_time_chart(struct cgroup *cg);
-void update_cpu_full_pressure_chart(struct cgroup *cg);
-void update_cpu_full_pressure_stall_time_chart(struct cgroup *cg);
-
-void update_mem_some_pressure_chart(struct cgroup *cg);
-void update_mem_some_pressure_stall_time_chart(struct cgroup *cg);
-void update_mem_full_pressure_chart(struct cgroup *cg);
-void update_mem_full_pressure_stall_time_chart(struct cgroup *cg);
-
-void update_irq_some_pressure_chart(struct cgroup *cg);
-void update_irq_some_pressure_stall_time_chart(struct cgroup *cg);
-void update_irq_full_pressure_chart(struct cgroup *cg);
-void update_irq_full_pressure_stall_time_chart(struct cgroup *cg);
-
-void update_io_some_pressure_chart(struct cgroup *cg);
-void update_io_some_pressure_stall_time_chart(struct cgroup *cg);
-void update_io_full_pressure_chart(struct cgroup *cg);
-void update_io_full_pressure_stall_time_chart(struct cgroup *cg);
-
-#endif // NETDATA_CGROUP_INTERNALS_H \ No newline at end of file
diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in
deleted file mode 100755
index 0f8b63256..000000000
--- a/collectors/cgroups.plugin/cgroup-name.sh.in
+++ /dev/null
@@ -1,706 +0,0 @@
-#!/usr/bin/env bash
-#shellcheck disable=SC2001
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2023 Netdata Inc.
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Script to find a better name for cgroups
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@"
-export LC_ALL=C
-
-cmd_line="'${0}' $(printf "'%s' " "${@}")"
-
-# -----------------------------------------------------------------------------
-# logging
-
-PROGRAM_NAME="$(basename "${0}")"
-
-# these should be the same with syslog() priorities
-NDLP_EMERG=0 # system is unusable
-NDLP_ALERT=1 # action must be taken immediately
-NDLP_CRIT=2 # critical conditions
-NDLP_ERR=3 # error conditions
-NDLP_WARN=4 # warning conditions
-NDLP_NOTICE=5 # normal but significant condition
-NDLP_INFO=6 # informational
-NDLP_DEBUG=7 # debug-level messages
-
-# the max (numerically) log level we will log
-LOG_LEVEL=$NDLP_INFO
-
-set_log_min_priority() {
- case "${NETDATA_LOG_LEVEL,,}" in
- "emerg" | "emergency")
- LOG_LEVEL=$NDLP_EMERG
- ;;
-
- "alert")
- LOG_LEVEL=$NDLP_ALERT
- ;;
-
- "crit" | "critical")
- LOG_LEVEL=$NDLP_CRIT
- ;;
-
- "err" | "error")
- LOG_LEVEL=$NDLP_ERR
- ;;
-
- "warn" | "warning")
- LOG_LEVEL=$NDLP_WARN
- ;;
-
- "notice")
- LOG_LEVEL=$NDLP_NOTICE
- ;;
-
- "info")
- LOG_LEVEL=$NDLP_INFO
- ;;
-
- "debug")
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- esac
-}
-
-set_log_min_priority
-
-log() {
- local level="${1}"
- shift 1
-
- [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
-
- systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
-INVOCATION_ID=${NETDATA_INVOCATION_ID}
-SYSLOG_IDENTIFIER=${PROGRAM_NAME}
-PRIORITY=${level}
-THREAD_TAG=cgroup-name
-ND_LOG_SOURCE=collector
-ND_REQUEST=${cmd_line}
-MESSAGE=${*//\\n/--NEWLINE--}
-
-EOFLOG
- # AN EMPTY LINE IS NEEDED ABOVE
-}
-
-info() {
- log "$NDLP_INFO" "${@}"
-}
-
-warning() {
- log "$NDLP_WARN" "${@}"
-}
-
-error() {
- log "$NDLP_ERR" "${@}"
-}
-
-fatal() {
- log "$NDLP_ALERT" "${@}"
- exit 1
-}
-
-debug() {
- log "$NDLP_DEBUG" "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-function parse_docker_like_inspect_output() {
- local output="${1}"
- eval "$(grep -E "^(NOMAD_NAMESPACE|NOMAD_JOB_NAME|NOMAD_TASK_NAME|NOMAD_SHORT_ALLOC_ID|CONT_NAME|IMAGE_NAME)=" <<<"$output")"
- if [ -n "$NOMAD_NAMESPACE" ] && [ -n "$NOMAD_JOB_NAME" ] && [ -n "$NOMAD_TASK_NAME" ] && [ -n "$NOMAD_SHORT_ALLOC_ID" ]; then
- NAME="${NOMAD_NAMESPACE}-${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}-${NOMAD_SHORT_ALLOC_ID}"
- else
- NAME=$(echo "${CONT_NAME}" | sed 's|^/||')
- fi
- if [ -n "${IMAGE_NAME}" ]; then
- LABELS="image=\"${IMAGE_NAME}\""
- fi
-}
-
-function docker_like_get_name_command() {
- local command="${1}"
- local id="${2}"
- info "Running command: ${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}' \"${id}\""
- if OUTPUT="$(${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}{{println}}IMAGE_NAME={{ .Config.Image}}' "${id}")" &&
- [ -n "$OUTPUT" ]; then
- parse_docker_like_inspect_output "$OUTPUT"
- fi
- return 0
-}
-
-function docker_like_get_name_api() {
- local host_var="${1}"
- local host="${!host_var}"
- local path="/containers/${2}/json"
- if [ -z "${host}" ]; then
- warning "No ${host_var} is set"
- return 1
- fi
- if ! command -v jq >/dev/null 2>&1; then
- warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
- return 1
- fi
- if [ -S "${host}" ]; then
- info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
- JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
- else
- info "Running API command: curl \"${host}${path}\""
- JSON=$(curl -sS "${host}${path}")
- fi
- if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
- parse_docker_like_inspect_output "$OUTPUT"
- fi
- return 0
-}
-
-# get_lbl_val returns the value for the label with the given name.
-# Returns "null" string if the label doesn't exist.
-# Expected labels format: 'name="value",...'.
-function get_lbl_val() {
- local labels want_name
- labels="${1}"
- want_name="${2}"
-
- IFS=, read -ra labels <<< "$labels"
-
- local lname lval
- for l in "${labels[@]}"; do
- IFS="=" read -r lname lval <<< "$l"
- if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
- echo "${lval:1:-1}" # trim "
- return 0
- fi
- done
-
- echo "null"
- return 1
-}
-
-function add_lbl_prefix() {
- local orig_labels prefix
- orig_labels="${1}"
- prefix="${2}"
-
- IFS=, read -ra labels <<< "$orig_labels"
-
- local new_labels
- for l in "${labels[@]}"; do
- new_labels+="${prefix}${l},"
- done
-
- echo "${new_labels:0:-1}" # trim last ','
-}
-
-function remove_lbl() {
- local orig_labels lbl_name
- orig_labels="${1}"
- lbl_name="${2}"
-
- IFS=, read -ra labels <<< "$orig_labels"
-
- local new_labels
- for l in "${labels[@]}"; do
- IFS="=" read -r lname lval <<< "$l"
- [ "$lbl_name" != "$lname" ] && new_labels+="${l},"
- done
-
- echo "${new_labels:0:-1}" # trim last ','
-}
-
-function k8s_is_pause_container() {
- local cgroup_path="${1}"
-
- local file
- if [ -d "${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct" ]; then
- file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct/$cgroup_path/cgroup.procs"
- else
- file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/$cgroup_path/cgroup.procs"
- fi
-
- [ ! -f "$file" ] && return 1
-
- local procs
- IFS= read -rd' ' procs 2>/dev/null <"$file"
- #shellcheck disable=SC2206
- procs=($procs)
-
- [ "${#procs[@]}" -ne 1 ] && return 1
-
- IFS= read -r comm 2>/dev/null <"/proc/${procs[0]}/comm"
-
- [ "$comm" == "pause" ]
- return
-}
-
-function k8s_gcp_get_cluster_name() {
- local header url id loc name
- header="Metadata-Flavor: Google"
- url="http://metadata/computeMetadata/v1"
- if id=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/project/project-id") &&
- loc=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-location") &&
- name=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-name") &&
- [ -n "$id" ] && [ -n "$loc" ] && [ -n "$name" ]; then
- echo "gke_${id}_${loc}_${name}"
- return 0
- fi
- return 1
-}
-
-# k8s_get_kubepod_name resolves */kubepods/* cgroup name.
-# pod level cgroup name format: 'pod_<namespace>_<pod_name>'
-# container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
-function k8s_get_kubepod_name() {
- # GKE /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
- # |-- kubepods
- # | |-- burstable
- # | | |-- pod98cee708-023b-11eb-933d-42010a800193
- # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
- # | `-- pode314bbac-d577-11ea-a171-42010a80013b
- # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
- #
- # GKE /sys/fs/cgroup/*/ (cri=containerd, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-pode1465238_4518_4c21_832f_fd9f87033dad.slice
- # | | | |-- cri-containerd-66be9b2efdf4d85288c319b8c1a2f50d2439b5617e36f45d9d0d0be1381113be.scope
- # | `-- kubepods-pod91f5b561_369f_4103_8015_66391059996a.slice
- # | |-- cri-containerd-24c53b774a586f06abc058619b47f71d9d869ac50c92898adbd199106fd0aaeb.scope
- #
- # GKE /sys/fs/cgroup/*/ (cri=crio, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-podad412dfe_3589_4056_965a_592356172968.slice
- # | | | |-- crio-77b019312fd9825828b70214b2c94da69c30621af2a7ee06f8beace4bc9439e5.scope
- #
- # Minikube (v1.8.2) /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
- # |-- kubepods.slice
- # | |-- kubepods-besteffort.slice
- # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
- # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
- #
- # kind v0.14.0
- # |-- kubelet.slice
- # | |-- kubelet-kubepods.slice
- # | | |-- kubelet-kubepods-besteffort.slice
- # | | | |-- kubelet-kubepods-besteffort-pod7881ed9e_c63e_4425_b5e0_ac55a08ae939.slice
- # | | | | |-- cri-containerd-00c7939458bffc416bb03451526e9fde13301d6654cfeadf5b4964a7fb5be1a9.scope
- #
- # NOTE: cgroups plugin
- # - uses '_' to join dir names (so it is <parent>_<child>_<child>_...)
- # - replaces '.' with '-'
-
- local fn="${FUNCNAME[0]}"
- local cgroup_path="${1}"
- local id="${2}"
-
- if [[ ! $id =~ ^.*kubepods.* ]]; then
- warning "${fn}: '${id}' is not kubepod cgroup."
- return 1
- fi
-
- local clean_id="$id"
- clean_id=${clean_id//.slice/}
- clean_id=${clean_id//.scope/}
-
- local name pod_uid cntr_id
- if [[ $clean_id == "kubepods" ]]; then
- name="$clean_id"
- elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
- # kubepods_<QOS_CLASS>
- # kubepods_kubepods-<QOS_CLASS>
- name=${clean_id//-/_}
- name=${name/#kubepods_kubepods/kubepods}
- elif [[ $clean_id =~ .+pod[a-f0-9_-]+_(docker|crio|cri-containerd)-([a-f0-9]+)$ ]]; then
- # ...pod<POD_UID>_(docker|crio|cri-containerd)-<CONTAINER_ID> (POD_UID w/ "_")
- cntr_id=${BASH_REMATCH[2]}
- elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
- # ...pod<POD_UID>_<CONTAINER_ID>
- cntr_id=${BASH_REMATCH[1]}
- elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
- # ...pod<POD_UID> (POD_UID w/ and w/o "_")
- pod_uid=${BASH_REMATCH[1]}
- pod_uid=${pod_uid//_/-}
- fi
-
- if [ -n "$name" ]; then
- echo "$name"
- return 0
- fi
-
- if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
- warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
- return 3
- fi
-
- [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
- [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
-
- if [ -n "$cntr_id" ] && k8s_is_pause_container "$cgroup_path"; then
- return 3
- fi
-
- if ! command -v jq > /dev/null 2>&1; then
- warning "${fn}: 'jq' command not available."
- return 1
- fi
-
- local tmp_kube_cluster_name="${TMPDIR:-"/tmp"}/netdata-cgroups-k8s-cluster-name"
- local tmp_kube_system_ns_uid_file="${TMPDIR:-"/tmp"}/netdata-cgroups-kubesystem-uid"
- local tmp_kube_containers_file="${TMPDIR:-"/tmp"}/netdata-cgroups-containers"
-
- local kube_cluster_name
- local kube_system_uid
- local labels
-
- if [ -n "$cntr_id" ] &&
- [ -f "$tmp_kube_cluster_name" ] &&
- [ -f "$tmp_kube_system_ns_uid_file" ] &&
- [ -f "$tmp_kube_containers_file" ] &&
- labels=$(grep "$cntr_id" "$tmp_kube_containers_file" 2>/dev/null); then
- IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
- IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
- else
- IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
- IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
- [ -z "$kube_cluster_name" ] && ! kube_cluster_name=$(k8s_gcp_get_cluster_name) && kube_cluster_name="unknown"
-
- local kube_system_ns
- local pods
-
- if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
- local token header host url
- token="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
- header="Authorization: Bearer $token"
- host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
-
- if [ -z "$kube_system_uid" ]; then
- url="https://$host/api/v1/namespaces/kube-system"
- # FIX: check HTTP response code
- if ! kube_system_ns=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
- warning "${fn}: error on curl '${url}': ${kube_system_ns}."
- fi
- fi
-
- local url
- if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
- url="${KUBELET_URL:-https://localhost:10250}/pods"
- else
- url="https://$host/api/v1/pods"
- [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
- fi
-
- # FIX: check HTTP response code
- if ! pods=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
- warning "${fn}: error on curl '${url}': ${pods}."
- return 1
- fi
- elif ps -C kubelet >/dev/null 2>&1 && command -v kubectl >/dev/null 2>&1; then
- if [ -z "$kube_system_uid" ]; then
- if ! kube_system_ns=$(kubectl --kubeconfig="$KUBE_CONFIG" get namespaces kube-system -o json 2>&1); then
- warning "${fn}: error on 'kubectl': ${kube_system_ns}."
- fi
- fi
-
- [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
- if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
- warning "${fn}: error on 'kubectl': ${pods}."
- return 1
- fi
- else
- warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
- return 1
- fi
-
- if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<<"$kube_system_ns" 2>&1); then
- warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
- fi
-
- local jq_filter
- jq_filter+='.items[] | "'
- jq_filter+='namespace=\"\(.metadata.namespace)\",'
- jq_filter+='pod_name=\"\(.metadata.name)\",'
- jq_filter+='pod_uid=\"\(.metadata.uid)\",'
- #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
- jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
- jq_filter+='node_name=\"\(.spec.nodeName)\",'
- jq_filter+='" + '
- jq_filter+='(.status.containerStatuses[]? | "'
- jq_filter+='container_name=\"\(.name)\",'
- jq_filter+='container_id=\"\(.containerID)\"'
- jq_filter+='") | '
- jq_filter+='sub("(docker|cri-o|containerd)://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
-
- local containers
- if ! containers=$(jq -r "${jq_filter}" <<<"$pods" 2>&1); then
- warning "${fn}: error on 'jq' parse pods: ${containers}."
- return 1
- fi
-
- [ -n "$kube_cluster_name" ] && echo "$kube_cluster_name" >"$tmp_kube_cluster_name" 2>/dev/null
- [ -n "$kube_system_ns" ] && [ -n "$kube_system_uid" ] && echo "$kube_system_uid" >"$tmp_kube_system_ns_uid_file" 2>/dev/null
- echo "$containers" >"$tmp_kube_containers_file" 2>/dev/null
- fi
-
- local qos_class
- if [[ $clean_id =~ .+(besteffort|burstable) ]]; then
- qos_class="${BASH_REMATCH[1]}"
- else
- qos_class="guaranteed"
- fi
-
- # available labels:
- # namespace, pod_name, pod_uid, container_name, container_id, node_name
- if [ -n "$cntr_id" ]; then
- if [ -n "$labels" ] || labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
- labels+=',kind="container"'
- labels+=",qos_class=\"$qos_class\""
- [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
- [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
- name="cntr"
- name+="_$(get_lbl_val "$labels" namespace)"
- name+="_$(get_lbl_val "$labels" pod_name)"
- name+="_$(get_lbl_val "$labels" container_name)"
- labels=$(remove_lbl "$labels" "container_id")
- labels=$(remove_lbl "$labels" "pod_uid")
- labels=$(add_lbl_prefix "$labels" "k8s_")
- name+=" $labels"
- else
- return 2
- fi
- elif [ -n "$pod_uid" ]; then
- if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
- labels="${labels%%,container_*}"
- labels+=',kind="pod"'
- labels+=",qos_class=\"$qos_class\""
- [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
- [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
- name="pod"
- name+="_$(get_lbl_val "$labels" namespace)"
- name+="_$(get_lbl_val "$labels" pod_name)"
- labels=$(remove_lbl "$labels" "pod_uid")
- labels=$(add_lbl_prefix "$labels" "k8s_")
- name+=" $labels"
- else
- return 2
- fi
- fi
-
- # jq filter nonexistent field and nonexistent label value is 'null'
- if [[ $name =~ _null(_|$) ]]; then
- warning "${fn}: invalid name: $name (cgroup '$id')"
- if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
- # local data is cached and may not contain the correct id
- return 2
- fi
- return 1
- fi
-
- echo "$name"
- [ -n "$name" ]
- return
-}
-
-function k8s_get_name() {
- local fn="${FUNCNAME[0]}"
- local cgroup_path="${1}"
- local id="${2}"
- local kubepod_name=""
-
- kubepod_name=$(k8s_get_kubepod_name "$cgroup_path" "$id")
-
- case "$?" in
- 0)
- kubepod_name="k8s_${kubepod_name}"
-
- local name labels
- name=${kubepod_name%% *}
- labels=${kubepod_name#* }
-
- if [ "$name" != "$labels" ]; then
- info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
- NAME="$name"
- LABELS="$labels"
- else
- info "${fn}: cgroup '${id}' has chart name '${NAME}'"
- NAME="$name"
- fi
- EXIT_CODE=$EXIT_SUCCESS
- ;;
- 1)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and enabling it."
- EXIT_CODE=$EXIT_SUCCESS
- ;;
- 2)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and asking for retry."
- EXIT_CODE=$EXIT_RETRY
- ;;
- *)
- NAME="k8s_${id}"
- warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and disabling it."
- EXIT_CODE=$EXIT_DISABLE
- ;;
- esac
-}
-
-function docker_get_name() {
- local id="${1}"
- # See https://github.com/netdata/netdata/pull/13523 for details
- if command -v snap >/dev/null 2>&1 && snap list docker >/dev/null 2>&1; then
- docker_like_get_name_api DOCKER_HOST "${id}"
- elif hash docker 2> /dev/null; then
- docker_like_get_name_command docker "${id}"
- else
- docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
- fi
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of docker container '${id}'"
- EXIT_CODE=$EXIT_RETRY
- NAME="${id:0:12}"
- else
- info "docker container '${id}' is named '${NAME}'"
- fi
-}
-
-function docker_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
- docker_get_name "${id}"
- else
- error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
-}
-
-function podman_get_name() {
- local id="${1}"
-
- # for Podman, prefer using the API if we can, as netdata will not normally have access
- # to other users' containers, so they will not be visible when running `podman ps`
- docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
-
- if [ -z "${NAME}" ]; then
- warning "cannot find the name of podman container '${id}'"
- EXIT_CODE=$EXIT_RETRY
- NAME="${id:0:12}"
- else
- info "podman container '${id}' is named '${NAME}'"
- fi
-}
-
-function podman_validate_id() {
- local id="${1}"
- if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
- podman_get_name "${id}"
- else
- error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
- fi
-}
-
-# -----------------------------------------------------------------------------
-
-DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
-PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
-CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409')
-CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409')
-EXIT_SUCCESS=0
-EXIT_RETRY=2
-EXIT_DISABLE=3
-EXIT_CODE=$EXIT_SUCCESS
-NAME=
-LABELS=
-
-# -----------------------------------------------------------------------------
-
-if [ -z "${CGROUP}" ]; then
- fatal "called without a cgroup name. Nothing to do."
-fi
-
-if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
- k8s_get_name "${CGROUP_PATH}" "${CGROUP}"
- fi
-fi
-
-if [ -z "${NAME}" ]; then
- if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # ECS
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ system.slice_containerd.service_cpuset_[a-fA-F0-9]+[-_\.]?.*$ ]]; then
- # docker containers under containerd
- #shellcheck disable=SC1117
- DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ystem.slice_containerd.service_cpuset_\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
- docker_validate_id "${DOCKERID}"
- elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
- # Podman
- PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
- podman_validate_id "${PODMANID}"
-
- elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
- # systemd-nspawn
- NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
-
- elif [[ ${CGROUP} =~ machine.slice_machine.*-lxc ]]; then
- # libvirtd / lxc containers
- # machine.slice machine-lxc/x2d969/x2dhubud0xians01.scope => lxc/hubud0xians01
- # machine.slice_machine-lxc/x2d969/x2dhubud0xians01.scope/libvirt_init.scope => lxc/hubud0xians01/libvirt_init
- NAME="lxc/$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-lxc//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
- elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
- # libvirtd / qemu virtual machines
- # machine.slice_machine-qemu_x2d1_x2dopnsense.scope => qemu_opnsense
- NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
-
- elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
- # libvirtd / qemu virtual machines
- NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
-
- elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d "${NETDATA_HOST_PREFIX}/etc/pve" ]]; then
- # Proxmox VMs
- FILENAME="${NETDATA_HOST_PREFIX}/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
- if [[ -f $FILENAME && -r $FILENAME ]]; then
- NAME="qemu_$(grep -e '^name: ' "${FILENAME}" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d "${NETDATA_HOST_PREFIX}/etc/pve" ]]; then
- # Proxmox Containers (LXC)
- FILENAME="${NETDATA_HOST_PREFIX}/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
- if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
- NAME=$(grep -e '^hostname: ' "${FILENAME}" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
- else
- error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
- fi
- elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
- # LXC 4.0
- NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
- fi
-
- [ -z "${NAME}" ] && NAME="${CGROUP}"
- [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
-fi
-
-NAME="${NAME// /_}"
-
-info "cgroup '${CGROUP}' is called '${NAME}', labels '${LABELS}'"
-if [ -n "$LABELS" ]; then
- echo "${NAME} ${LABELS}"
-else
- echo "${NAME}"
-fi
-
-exit ${EXIT_CODE}
diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh.in b/collectors/cgroups.plugin/cgroup-network-helper.sh.in
deleted file mode 100755
index da9b9162a..000000000
--- a/collectors/cgroups.plugin/cgroup-network-helper.sh.in
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1117
-
-# cgroup-network-helper.sh
-# detect container and virtual machine interfaces
-#
-# (C) 2023 Netdata Inc.
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This script is called as root (by cgroup-network), with either a pid, or a cgroup path.
-# It tries to find all the network interfaces that belong to the same cgroup.
-#
-# It supports several method for this detection:
-#
-# 1. cgroup-network (the binary father of this script) detects veth network interfaces,
-# by examining iflink and ifindex IDs and switching namespaces
-# (it also detects the interface name as it is used by the container).
-#
-# 2. this script, uses /proc/PID/fdinfo to find tun/tap network interfaces.
-#
-# 3. this script, calls virsh to find libvirt network interfaces.
-#
-
-# -----------------------------------------------------------------------------
-
-# the system path is cleared by cgroup-network
-# shellcheck source=/dev/null
-[ -f /etc/profile ] && source /etc/profile
-export PATH="${PATH}:@sbindir_POST@"
-
-export LC_ALL=C
-
-cmd_line="'${0}' $(printf "'%s' " "${@}")"
-
-# -----------------------------------------------------------------------------
-# logging
-
-PROGRAM_NAME="$(basename "${0}")"
-
-# these should be the same with syslog() priorities
-NDLP_EMERG=0 # system is unusable
-NDLP_ALERT=1 # action must be taken immediately
-NDLP_CRIT=2 # critical conditions
-NDLP_ERR=3 # error conditions
-NDLP_WARN=4 # warning conditions
-NDLP_NOTICE=5 # normal but significant condition
-NDLP_INFO=6 # informational
-NDLP_DEBUG=7 # debug-level messages
-
-# the max (numerically) log level we will log
-LOG_LEVEL=$NDLP_INFO
-
-set_log_min_priority() {
- case "${NETDATA_LOG_LEVEL,,}" in
- "emerg" | "emergency")
- LOG_LEVEL=$NDLP_EMERG
- ;;
-
- "alert")
- LOG_LEVEL=$NDLP_ALERT
- ;;
-
- "crit" | "critical")
- LOG_LEVEL=$NDLP_CRIT
- ;;
-
- "err" | "error")
- LOG_LEVEL=$NDLP_ERR
- ;;
-
- "warn" | "warning")
- LOG_LEVEL=$NDLP_WARN
- ;;
-
- "notice")
- LOG_LEVEL=$NDLP_NOTICE
- ;;
-
- "info")
- LOG_LEVEL=$NDLP_INFO
- ;;
-
- "debug")
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- esac
-}
-
-set_log_min_priority
-
-log() {
- local level="${1}"
- shift 1
-
- [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
-
- systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
-INVOCATION_ID=${NETDATA_INVOCATION_ID}
-SYSLOG_IDENTIFIER=${PROGRAM_NAME}
-PRIORITY=${level}
-THREAD_TAG=cgroup-network-helper
-ND_LOG_SOURCE=collector
-ND_REQUEST=${cmd_line}
-MESSAGE=${*//\\n/--NEWLINE--}
-
-EOFLOG
- # AN EMPTY LINE IS NEEDED ABOVE
-}
-
-info() {
- log "$NDLP_INFO" "${@}"
-}
-
-warning() {
- log "$NDLP_WARN" "${@}"
-}
-
-error() {
- log "$NDLP_ERR" "${@}"
-}
-
-fatal() {
- log "$NDLP_ALERT" "${@}"
- exit 1
-}
-
-debug() {
- log "$NDLP_DEBUG" "${@}"
-}
-
-debug=0
-if [ "${NETDATA_CGROUP_NETWORK_HELPER_DEBUG-0}" = "1" ]; then
- debug=1
- LOG_LEVEL=$NDLP_DEBUG
-fi
-
-# -----------------------------------------------------------------------------
-# check for BASH v4+ (required for associative arrays)
-
-if [ ${BASH_VERSINFO[0]} -lt 4 ]; then
- echo >&2 "BASH version 4 or later is required (this is ${BASH_VERSION})."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# parse the arguments
-
-pid=
-cgroup=
-while [ -n "${1}" ]
-do
- case "${1}" in
- --cgroup) cgroup="${2}"; shift 1;;
- --pid|-p) pid="${2}"; shift 1;;
- --debug|debug)
- debug=1
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- *) fatal "Cannot understand argument '${1}'";;
- esac
-
- shift
-done
-
-if [ -z "${pid}" ] && [ -z "${cgroup}" ]
-then
- fatal "Either --pid or --cgroup is required"
-fi
-
-# -----------------------------------------------------------------------------
-
-set_source() {
- [ ${debug} -eq 1 ] && echo "SRC ${*}"
-}
-
-
-# -----------------------------------------------------------------------------
-# veth interfaces via cgroup
-
-# cgroup-network can detect veth interfaces by itself (written in C).
-# If you seek for a shell version of what it does, check this:
-# https://github.com/netdata/netdata/issues/474#issuecomment-317866709
-
-
-# -----------------------------------------------------------------------------
-# tun/tap interfaces via /proc/PID/fdinfo
-
-# find any tun/tap devices linked to a pid
-proc_pid_fdinfo_iff() {
- local p="${1}" # the pid
-
- debug "Searching for tun/tap interfaces for pid ${p}..."
- set_source "fdinfo"
- grep "^iff:.*" "${NETDATA_HOST_PREFIX}/proc/${p}/fdinfo"/* 2>/dev/null | cut -f 2
-}
-
-find_tun_tap_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
- [ -d "${c}/emulator" ] && c="${c}/emulator" # check for 'emulator' subdirectory
- c="${c}/cgroup.procs" # make full path
-
- # for each pid of the cgroup
- # find any tun/tap devices linked to the pid
- if [ -f "${c}" ]
- then
- local p
- for p in $(< "${c}" )
- do
- proc_pid_fdinfo_iff "${p}"
- done
- else
- debug "Cannot find file '${c}', not searching for tun/tap interfaces."
- fi
-}
-
-
-# -----------------------------------------------------------------------------
-# virsh domain network interfaces
-
-virsh_cgroup_to_domain_name() {
- local c="${1}" # the cgroup path
-
- debug "extracting a possible virsh domain from cgroup ${c}..."
-
- # extract for the cgroup path
- sed -n -e "s|.*/machine-qemu\\\\x2d[0-9]\+\\\\x2d\(.*\)\.scope$|\1|p" \
- -e "s|.*/machine/qemu-[0-9]\+-\(.*\)\.libvirt-qemu$|\1|p" \
- -e "s|.*/machine/\(.*\)\.libvirt-qemu$|\1|p" \
- <<EOF
-${c}
-EOF
-}
-
-virsh_find_all_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- # the virsh command
- local virsh
- # shellcheck disable=SC2230
- virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)"
-
- if [ -n "${virsh}" ]
- then
- local d
- d="$(virsh_cgroup_to_domain_name "${c}")"
- # convert hex to character
- # e.g.: vm01\x2dweb => vm01-web (https://github.com/netdata/netdata/issues/11088#issuecomment-832618149)
- d="$(printf '%b' "${d}")"
-
- if [ -n "${d}" ]
- then
- debug "running: virsh domiflist ${d}; to find the network interfaces"
-
- # 'virsh -r domiflist <domain>' example output
- # Interface Type Source Model MAC
- #--------------------------------------------------------------
- # vnet3 bridge br0 virtio 52:54:00:xx:xx:xx
- # vnet4 network default virtio 52:54:00:yy:yy:yy
-
- # match only 'network' interfaces from virsh output
- set_source "virsh"
- "${virsh}" -r domiflist "${d}" |\
- sed -n \
- -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+network[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p" \
- -e "s|^[[:space:]]\?\([^[:space:]]\+\)[[:space:]]\+bridge[[:space:]]\+\([^[:space:]]\+\)[[:space:]]\+[^[:space:]]\+[[:space:]]\+[^[:space:]]\+$|\1 \1_\2|p"
- else
- debug "no virsh domain extracted from cgroup ${c}"
- fi
- else
- debug "virsh command is not available"
- fi
-}
-
-# -----------------------------------------------------------------------------
-# netnsid detected interfaces
-
-netnsid_find_all_interfaces_for_pid() {
- local pid="${1}"
- [ -z "${pid}" ] && return 1
-
- local nsid
- nsid=$(lsns -t net -p "${pid}" -o NETNSID -nr 2>/dev/null)
- if [ -z "${nsid}" ] || [ "${nsid}" = "unassigned" ]; then
- return 1
- fi
-
- set_source "netnsid"
- ip link show |\
- grep -B 1 -E " link-netnsid ${nsid}($| )" |\
- sed -n -e "s|^[[:space:]]*[0-9]\+:[[:space:]]\+\([A-Za-z0-9_]\+\)\(@[A-Za-z0-9_]\+\)*:[[:space:]].*$|\1|p"
-}
-
-netnsid_find_all_interfaces_for_cgroup() {
- local c="${1}" # the cgroup path
-
- if [ -f "${c}/cgroup.procs" ]; then
- netnsid_find_all_interfaces_for_pid "$(head -n 1 "${c}/cgroup.procs" 2>/dev/null)"
- else
- debug "Cannot find file '${c}/cgroup.procs', not searching for netnsid interfaces."
- fi
-}
-
-# -----------------------------------------------------------------------------
-
-find_all_interfaces_of_pid_or_cgroup() {
- local p="${1}" c="${2}" # the pid and the cgroup path
-
- if [ -n "${pid}" ]
- then
- # we have been called with a pid
-
- proc_pid_fdinfo_iff "${p}"
- netnsid_find_all_interfaces_for_pid "${p}"
-
- elif [ -n "${c}" ]
- then
- # we have been called with a cgroup
-
- info "searching for network interfaces of cgroup '${c}'"
-
- find_tun_tap_interfaces_for_cgroup "${c}"
- virsh_find_all_interfaces_for_cgroup "${c}"
- netnsid_find_all_interfaces_for_cgroup "${c}"
-
- else
-
- error "Either a pid or a cgroup path is needed"
- return 1
-
- fi
-
- return 0
-}
-
-# -----------------------------------------------------------------------------
-
-# an associative array to store the interfaces
-# the index is the interface name as seen by the host
-# the value is the interface name as seen by the guest / container
-declare -A devs=()
-
-# store all interfaces found in the associative array
-# this will also give the unique devices, as seen by the host
-last_src=
-# shellcheck disable=SC2162
-while read host_device guest_device
-do
- [ -z "${host_device}" ] && continue
-
- [ "${host_device}" = "SRC" ] && last_src="${guest_device}" && continue
-
- # the default guest_device is the host_device
- [ -z "${guest_device}" ] && guest_device="${host_device}"
-
- # when we run in debug, show the source
- debug "Found host device '${host_device}', guest device '${guest_device}', detected via '${last_src}'"
-
- if [ -z "${devs[${host_device}]}" ] || [ "${devs[${host_device}]}" = "${host_device}" ]; then
- devs[${host_device}]="${guest_device}"
- fi
-
-done < <( find_all_interfaces_of_pid_or_cgroup "${pid}" "${cgroup}" )
-
-# print the interfaces found, in the format netdata expects them
-found=0
-for x in "${!devs[@]}"
-do
- found=$((found + 1))
- echo "${x} ${devs[${x}]}"
-done
-
-debug "found ${found} network interfaces for pid '${pid}', cgroup '${cgroup}', run as ${USER}, ${UID}"
-
-# let netdata know if we found any
-[ ${found} -eq 0 ] && exit 1
-exit 0
diff --git a/collectors/cgroups.plugin/cgroup-network.c b/collectors/cgroups.plugin/cgroup-network.c
deleted file mode 100644
index 508ea07c6..000000000
--- a/collectors/cgroups.plugin/cgroup-network.c
+++ /dev/null
@@ -1,743 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#ifdef HAVE_SETNS
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE /* See feature_test_macros(7) */
-#endif
-#include <sched.h>
-#endif
-
-char env_netdata_host_prefix[FILENAME_MAX + 50] = "";
-char env_netdata_log_method[FILENAME_MAX + 50] = "";
-char env_netdata_log_format[FILENAME_MAX + 50] = "";
-char env_netdata_log_level[FILENAME_MAX + 50] = "";
-char *environment[] = {
- "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
- env_netdata_host_prefix,
- env_netdata_log_method,
- env_netdata_log_format,
- env_netdata_log_level,
- NULL
-};
-
-struct iface {
- const char *device;
- uint32_t hash;
-
- unsigned int ifindex;
- unsigned int iflink;
-
- struct iface *next;
-};
-
-unsigned int calc_num_ifaces(struct iface *root) {
- unsigned int num = 0;
- for (struct iface *h = root; h; h = h->next) {
- num++;
- }
- return num;
-}
-
-unsigned int read_iface_iflink(const char *prefix, const char *iface) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/iflink", prefix, iface);
-
- unsigned long long iflink = 0;
- int ret = read_single_number_file(filename, &iflink);
- if(ret) collector_error("Cannot read '%s'.", filename);
-
- return (unsigned int)iflink;
-}
-
-unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/sys/class/net/%s/ifindex", prefix, iface);
-
- unsigned long long ifindex = 0;
- int ret = read_single_number_file(filename, &ifindex);
- if(ret) collector_error("Cannot read '%s'.", filename);
-
- return (unsigned int)ifindex;
-}
-
-struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *prefix) {
- if(!prefix) prefix = "";
-
- procfile *ff = NULL;
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
-
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("parsing '%s'", filename);
-#endif
-
- ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- collector_error("Cannot open file '%s'", filename);
- return NULL;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- collector_error("Cannot read file '%s'", filename);
- return NULL;
- }
-
- size_t lines = procfile_lines(ff), l;
- struct iface *root = NULL;
- for(l = 2; l < lines ;l++) {
- if (unlikely(procfile_linewords(ff, l) < 1)) continue;
-
- struct iface *t = callocz(1, sizeof(struct iface));
- t->device = strdupz(procfile_lineword(ff, l, 0));
- t->hash = simple_hash(t->device);
- t->ifindex = read_iface_ifindex(prefix, t->device);
- t->iflink = read_iface_iflink(prefix, t->device);
- t->next = root;
- root = t;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink);
-#endif
- }
-
- procfile_close(ff);
-
- return root;
-}
-
-void free_iface(struct iface *iface) {
- freez((void *)iface->device);
- freez(iface);
-}
-
-void free_host_ifaces(struct iface *iface) {
- while(iface) {
- struct iface *t = iface->next;
- free_iface(iface);
- iface = t;
- }
-}
-
-int iface_is_eligible(struct iface *iface) {
- if(iface->iflink != iface->ifindex)
- return 1;
-
- return 0;
-}
-
-int eligible_ifaces(struct iface *root) {
- int eligible = 0;
-
- struct iface *t;
- for(t = root; t ; t = t->next)
- if(iface_is_eligible(t))
- eligible++;
-
- return eligible;
-}
-
-static void continue_as_child(void) {
- pid_t child = fork();
- int status;
- pid_t ret;
-
- if (child < 0)
- collector_error("fork() failed");
-
- /* Only the child returns */
- if (child == 0)
- return;
-
- for (;;) {
- ret = waitpid(child, &status, WUNTRACED);
- if ((ret == child) && (WIFSTOPPED(status))) {
- /* The child suspended so suspend us as well */
- kill(getpid(), SIGSTOP);
- kill(child, SIGCONT);
- } else {
- break;
- }
- }
-
- /* Return the child's exit code if possible */
- if (WIFEXITED(status)) {
- exit(WEXITSTATUS(status));
- } else if (WIFSIGNALED(status)) {
- kill(getpid(), WTERMSIG(status));
- }
-
- exit(EXIT_FAILURE);
-}
-
-int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
- if(!prefix) prefix = "";
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/%s", prefix, (int)pid, ns);
- int fd = open(filename, O_RDONLY);
-
- if(fd == -1)
- collector_error("Cannot open proc_pid_fd() file '%s'", filename);
-
- return fd;
-}
-
-static struct ns {
- int nstype;
- int fd;
- int status;
- const char *name;
- const char *path;
-} all_ns[] = {
- // { .nstype = CLONE_NEWUSER, .fd = -1, .status = -1, .name = "user", .path = "ns/user" },
- // { .nstype = CLONE_NEWCGROUP, .fd = -1, .status = -1, .name = "cgroup", .path = "ns/cgroup" },
- // { .nstype = CLONE_NEWIPC, .fd = -1, .status = -1, .name = "ipc", .path = "ns/ipc" },
- // { .nstype = CLONE_NEWUTS, .fd = -1, .status = -1, .name = "uts", .path = "ns/uts" },
- { .nstype = CLONE_NEWNET, .fd = -1, .status = -1, .name = "network", .path = "ns/net" },
- { .nstype = CLONE_NEWPID, .fd = -1, .status = -1, .name = "pid", .path = "ns/pid" },
- { .nstype = CLONE_NEWNS, .fd = -1, .status = -1, .name = "mount", .path = "ns/mnt" },
-
- // terminator
- { .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
-};
-
-int switch_namespace(const char *prefix, pid_t pid) {
-
-#ifdef HAVE_SETNS
-
- int i;
- for(i = 0; all_ns[i].name ; i++)
- all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
-
- int root_fd = proc_pid_fd(prefix, "root", pid);
- int cwd_fd = proc_pid_fd(prefix, "cwd", pid);
-
- setgroups(0, NULL);
-
- // 2 passes - found it at nsenter source code
- // this is related CLONE_NEWUSER functionality
-
- // This code cannot switch user namespace (it can all the other namespaces)
- // Fortunately, we don't need to switch user namespaces.
-
- int pass;
- for(pass = 0; pass < 2 ;pass++) {
- for(i = 0; all_ns[i].name ; i++) {
- if (all_ns[i].fd != -1 && all_ns[i].status == -1) {
- if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
- if(pass == 1) {
- all_ns[i].status = 0;
- collector_error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
- }
- }
- else
- all_ns[i].status = 1;
- }
- }
- }
-
- setgroups(0, NULL);
-
- if(root_fd != -1) {
- if(fchdir(root_fd) < 0)
- collector_error("Cannot fchdir() to pid %d root directory", (int)pid);
-
- if(chroot(".") < 0)
- collector_error("Cannot chroot() to pid %d root directory", (int)pid);
-
- close(root_fd);
- }
-
- if(cwd_fd != -1) {
- if(fchdir(cwd_fd) < 0)
- collector_error("Cannot fchdir() to pid %d current working directory", (int)pid);
-
- close(cwd_fd);
- }
-
- int do_fork = 0;
- for(i = 0; all_ns[i].name ; i++)
- if(all_ns[i].fd != -1) {
-
- // CLONE_NEWPID requires a fork() to become effective
- if(all_ns[i].nstype == CLONE_NEWPID && all_ns[i].status)
- do_fork = 1;
-
- close(all_ns[i].fd);
- }
-
- if(do_fork)
- continue_as_child();
-
- return 0;
-
-#else
-
- errno = ENOSYS;
- collector_error("setns() is missing on this system.");
- return 1;
-
-#endif
-}
-
-pid_t read_pid_from_cgroup_file(const char *filename) {
- int fd = open(filename, procfile_open_flags);
- if(fd == -1) {
- if (errno != ENOENT)
- collector_error("Cannot open pid_from_cgroup() file '%s'.", filename);
- return 0;
- }
-
- FILE *fp = fdopen(fd, "r");
- if(!fp) {
- collector_error("Cannot upgrade fd to fp for file '%s'.", filename);
- return 0;
- }
-
- char buffer[100 + 1];
- pid_t pid = 0;
- char *s;
- while((s = fgets(buffer, 100, fp))) {
- buffer[100] = '\0';
- pid = atoi(s);
- if(pid > 0) break;
- }
-
- fclose(fp);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(pid > 0) collector_info("found pid %d on file '%s'", pid, filename);
-#endif
-
- return pid;
-}
-
-pid_t read_pid_from_cgroup_files(const char *path) {
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/cgroup.procs", path);
- pid_t pid = read_pid_from_cgroup_file(filename);
- if(pid > 0) return pid;
-
- snprintfz(filename, FILENAME_MAX, "%s/tasks", path);
- return read_pid_from_cgroup_file(filename);
-}
-
-pid_t read_pid_from_cgroup(const char *path) {
- pid_t pid = read_pid_from_cgroup_files(path);
- if (pid > 0) return pid;
-
- DIR *dir = opendir(path);
- if (!dir) {
- collector_error("cannot read directory '%s'", path);
- return 0;
- }
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- ))
- continue;
-
- if (de->d_type == DT_DIR) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
- pid = read_pid_from_cgroup(filename);
- if(pid > 0) break;
- }
- }
- closedir(dir);
- return pid;
-}
-
-// ----------------------------------------------------------------------------
-// send the result to netdata
-
-struct found_device {
- const char *host_device;
- const char *guest_device;
-
- uint32_t host_device_hash;
-
- struct found_device *next;
-} *detected_devices = NULL;
-
-void add_device(const char *host, const char *guest) {
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("adding device with host '%s', guest '%s'", host, guest);
-#endif
-
- uint32_t hash = simple_hash(host);
-
- if(guest && (!*guest || strcmp(host, guest) == 0))
- guest = NULL;
-
- struct found_device *f;
- for(f = detected_devices; f ; f = f->next) {
- if(f->host_device_hash == hash && !strcmp(host, f->host_device)) {
-
- if(guest && (!f->guest_device || !strcmp(f->host_device, f->guest_device))) {
- if(f->guest_device) freez((void *)f->guest_device);
- f->guest_device = strdupz(guest);
- }
-
- return;
- }
- }
-
- f = mallocz(sizeof(struct found_device));
- f->host_device = strdupz(host);
- f->host_device_hash = hash;
- f->guest_device = (guest)?strdupz(guest):NULL;
- f->next = detected_devices;
- detected_devices = f;
-}
-
-int send_devices(void) {
- int found = 0;
-
- struct found_device *f;
- for(f = detected_devices; f ; f = f->next) {
- found++;
- printf("%s %s\n", f->host_device, (f->guest_device)?f->guest_device:f->host_device);
- }
-
- return found;
-}
-
-// ----------------------------------------------------------------------------
-// this function should be called only **ONCE**
-// also it has to be the **LAST** to be called
-// since it switches namespaces, so after this call, everything is different!
-
-void detect_veth_interfaces(pid_t pid) {
- struct iface *cgroup = NULL;
- struct iface *host, *h, *c;
-
- host = read_proc_net_dev("host", netdata_configured_host_prefix);
- if(!host) {
- errno = 0;
- collector_error("cannot read host interface list.");
- goto cleanup;
- }
-
- if(!eligible_ifaces(host)) {
- errno = 0;
- collector_info("there are no double-linked host interfaces available.");
- goto cleanup;
- }
-
- if(switch_namespace(netdata_configured_host_prefix, pid)) {
- errno = 0;
- collector_error("cannot switch to the namespace of pid %u", (unsigned int) pid);
- goto cleanup;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("switched to namespaces of pid %d", pid);
-#endif
-
- cgroup = read_proc_net_dev("cgroup", NULL);
- if(!cgroup) {
- errno = 0;
- collector_error("cannot read cgroup interface list.");
- goto cleanup;
- }
-
- if(!eligible_ifaces(cgroup)) {
- errno = 0;
- collector_error("there are not double-linked cgroup interfaces available.");
- goto cleanup;
- }
-
- unsigned int host_dev_num = calc_num_ifaces(host);
- unsigned int cgroup_dev_num = calc_num_ifaces(cgroup);
- // host ifaces == guest ifaces => we are still in the host namespace
- // and we can't really identify which ifaces belong to the cgroup (e.g. Proxmox VM).
- if (host_dev_num == cgroup_dev_num) {
- unsigned int m = 0;
- for (h = host; h; h = h->next) {
- for (c = cgroup; c; c = c->next) {
- if (h->ifindex == c->ifindex && h->iflink == c->iflink) {
- m++;
- break;
- }
- }
- }
- if (host_dev_num == m) {
- goto cleanup;
- }
- }
-
- for(h = host; h ; h = h->next) {
- if(iface_is_eligible(h)) {
- for (c = cgroup; c; c = c->next) {
- if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
- add_device(h->device, c->device);
- }
- }
- }
- }
-
-cleanup:
- free_host_ifaces(cgroup);
- free_host_ifaces(host);
-}
-
-// ----------------------------------------------------------------------------
-// call the external helper
-
-#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
-void call_the_helper(pid_t pid, const char *cgroup) {
- if(setresuid(0, 0, 0) == -1)
- collector_error("setresuid(0, 0, 0) failed.");
-
- char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- if(cgroup)
- snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
- else
- snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
-
- collector_info("running: %s", command);
-
- pid_t cgroup_pid;
- FILE *fp_child_input, *fp_child_output;
-
- if(cgroup) {
- (void)netdata_popen_raw_default_flags(&cgroup_pid, environment, &fp_child_input, &fp_child_output, PLUGINS_DIR "/cgroup-network-helper.sh", "--cgroup", cgroup);
- }
- else {
- char buffer[100];
- snprintfz(buffer, sizeof(buffer) - 1, "%d", pid);
- (void)netdata_popen_raw_default_flags(&cgroup_pid, environment, &fp_child_input, &fp_child_output, PLUGINS_DIR "/cgroup-network-helper.sh", "--pid", buffer);
- }
-
- if(fp_child_output) {
- char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- char *s;
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp_child_output))) {
- trim(s);
-
- if(*s && *s != '\n') {
- char *t = s;
- while(*t && *t != ' ') t++;
- if(*t == ' ') {
- *t = '\0';
- t++;
- }
-
- if(!*s || !*t) continue;
- add_device(s, t);
- }
- }
-
- netdata_pclose(fp_child_input, fp_child_output, cgroup_pid);
- }
- else
- collector_error("cannot execute cgroup-network helper script: %s", command);
-}
-
-int is_valid_path_symbol(char c) {
- switch(c) {
- case '/': // path separators
- case '\\': // needed for virsh domains \x2d1\x2dname
- case ' ': // space
- case '-': // hyphen
- case '_': // underscore
- case '.': // dot
- case ',': // comma
- return 1;
-
- default:
- return 0;
- }
-}
-
-// we will pass this path a shell script running as root
-// so, we need to make sure the path will be valid
-// and will not include anything that could allow
-// the caller use shell expansion for gaining escalated
-// privileges.
-int verify_path(const char *path) {
- struct stat sb;
-
- char c;
- const char *s = path;
- while((c = *s++)) {
- if(!( isalnum(c) || is_valid_path_symbol(c) )) {
- collector_error("invalid character in path '%s'", path);
- return -1;
- }
- }
-
- if(strstr(path, "\\") && !strstr(path, "\\x")) {
- collector_error("invalid escape sequence in path '%s'", path);
- return 1;
- }
-
- if(strstr(path, "/../")) {
- collector_error("invalid parent path sequence detected in '%s'", path);
- return 1;
- }
-
- if(path[0] != '/') {
- collector_error("only absolute path names are supported - invalid path '%s'", path);
- return -1;
- }
-
- if (stat(path, &sb) == -1) {
- collector_error("cannot stat() path '%s'", path);
- return -1;
- }
-
- if((sb.st_mode & S_IFMT) != S_IFDIR) {
- collector_error("path '%s' is not a directory", path);
- return -1;
- }
-
- return 0;
-}
-
-/*
-char *fix_path_variable(void) {
- const char *path = getenv("PATH");
- if(!path || !*path) return 0;
-
- char *p = strdupz(path);
- char *safe_path = callocz(1, strlen(p) + strlen("PATH=") + 1);
- strcpy(safe_path, "PATH=");
-
- int added = 0;
- char *ptr = p;
- while(ptr && *ptr) {
- char *s = strsep(&ptr, ":");
- if(s && *s) {
- if(verify_path(s) == -1) {
- collector_error("the PATH variable includes an invalid path '%s' - removed it.", s);
- }
- else {
- collector_info("the PATH variable includes a valid path '%s'.", s);
- if(added) strcat(safe_path, ":");
- strcat(safe_path, s);
- added++;
- }
- }
- }
-
- collector_info("unsafe PATH: '%s'.", path);
- collector_info(" safe PATH: '%s'.", safe_path);
-
- freez(p);
- return safe_path;
-}
-*/
-
-// ----------------------------------------------------------------------------
-// main
-
-void usage(void) {
- fprintf(stderr, "%s [ -p PID | --pid PID | --cgroup /path/to/cgroup ]\n", program_name);
- exit(1);
-}
-
-int main(int argc, char **argv) {
- pid_t pid = 0;
-
- program_version = VERSION;
- clocks_init();
- nd_log_initialize_for_external_plugins("cgroup-network");
-
- // since cgroup-network runs as root, prevent it from opening symbolic links
- procfile_open_flags = O_RDONLY|O_NOFOLLOW;
-
- // ------------------------------------------------------------------------
- // make sure NETDATA_HOST_PREFIX is safe
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix(false) == -1) exit(1);
-
- if(netdata_configured_host_prefix[0] != '\0' && verify_path(netdata_configured_host_prefix) == -1)
- fatal("invalid NETDATA_HOST_PREFIX '%s'", netdata_configured_host_prefix);
-
- // ------------------------------------------------------------------------
- // build a safe environment for our script
-
- // the first environment variable is a fixed PATH=
- snprintfz(env_netdata_host_prefix, sizeof(env_netdata_host_prefix) - 1, "NETDATA_HOST_PREFIX=%s", netdata_configured_host_prefix);
-
- char *s;
-
- s = getenv("NETDATA_LOG_METHOD");
- snprintfz(env_netdata_log_method, sizeof(env_netdata_log_method) - 1, "NETDATA_LOG_METHOD=%s", nd_log_method_for_external_plugins(s));
-
- s = getenv("NETDATA_LOG_FORMAT");
- if (s)
- snprintfz(env_netdata_log_format, sizeof(env_netdata_log_format) - 1, "NETDATA_LOG_FORMAT=%s", s);
-
- s = getenv("NETDATA_LOG_LEVEL");
- if (s)
- snprintfz(env_netdata_log_level, sizeof(env_netdata_log_level) - 1, "NETDATA_LOG_LEVEL=%s", s);
-
- // ------------------------------------------------------------------------
-
- if(argc == 2 && (!strcmp(argv[1], "version") || !strcmp(argv[1], "-version") || !strcmp(argv[1], "--version") || !strcmp(argv[1], "-v") || !strcmp(argv[1], "-V"))) {
- fprintf(stderr, "cgroup-network %s\n", VERSION);
- exit(0);
- }
-
- if(argc != 3)
- usage();
-
- int arg = 1;
- int helper = 1;
- if (getenv("KUBERNETES_SERVICE_HOST") != NULL && getenv("KUBERNETES_SERVICE_PORT") != NULL)
- helper = 0;
-
- if(!strcmp(argv[arg], "-p") || !strcmp(argv[arg], "--pid")) {
- pid = atoi(argv[arg+1]);
-
- if(pid <= 0) {
- errno = 0;
- collector_error("Invalid pid %d given", (int) pid);
- return 2;
- }
-
- if(helper) call_the_helper(pid, NULL);
- }
- else if(!strcmp(argv[arg], "--cgroup")) {
- char *cgroup = argv[arg+1];
- if(verify_path(cgroup) == -1) {
- collector_error("cgroup '%s' does not exist or is not valid.", cgroup);
- return 1;
- }
-
- pid = read_pid_from_cgroup(cgroup);
- if(helper) call_the_helper(pid, cgroup);
-
- if(pid <= 0 && !detected_devices) {
- errno = 0;
- collector_error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
- }
- }
- else
- usage();
-
- if(pid > 0)
- detect_veth_interfaces(pid);
-
- int found = send_devices();
- if(found <= 0) return 1;
- return 0;
-}
diff --git a/collectors/cgroups.plugin/cgroup-top.c b/collectors/cgroups.plugin/cgroup-top.c
deleted file mode 100644
index 8d44d3b56..000000000
--- a/collectors/cgroups.plugin/cgroup-top.c
+++ /dev/null
@@ -1,520 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "cgroup-internals.h"
-
-struct cgroup_netdev_link {
- size_t read_slot;
- NETDATA_DOUBLE received[2];
- NETDATA_DOUBLE sent[2];
-};
-
-static DICTIONARY *cgroup_netdev_link_dict = NULL;
-
-void cgroup_netdev_link_init(void) {
- cgroup_netdev_link_dict = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE|DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(struct cgroup_netdev_link));
-}
-
-const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg) {
- if(!cg->cgroup_netdev_link) {
- struct cgroup_netdev_link t = {
- .read_slot = 0,
- .received = {NAN, NAN},
- .sent = {NAN, NAN},
- };
-
- cg->cgroup_netdev_link =
- dictionary_set_and_acquire_item(cgroup_netdev_link_dict, cg->id, &t, sizeof(struct cgroup_netdev_link));
- }
-
- return dictionary_acquired_item_dup(cgroup_netdev_link_dict, cg->cgroup_netdev_link);
-}
-
-void cgroup_netdev_delete(struct cgroup *cg) {
- if(cg->cgroup_netdev_link) {
- dictionary_acquired_item_release(cgroup_netdev_link_dict, cg->cgroup_netdev_link);
- dictionary_del(cgroup_netdev_link_dict, cg->id);
- dictionary_garbage_collect(cgroup_netdev_link_dict);
- cg->cgroup_netdev_link = NULL;
- }
-}
-
-void cgroup_netdev_release(const DICTIONARY_ITEM *link) {
- if(link)
- dictionary_acquired_item_release(cgroup_netdev_link_dict, link);
-}
-
-const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link) {
- return dictionary_acquired_item_dup(cgroup_netdev_link_dict, link);
-}
-
-void cgroup_netdev_reset_all(void) {
- struct cgroup_netdev_link *t;
- dfe_start_read(cgroup_netdev_link_dict, t) {
- if(t->read_slot >= 1) {
- t->read_slot = 0;
- t->received[1] = NAN;
- t->sent[1] = NAN;
- }
- else {
- t->read_slot = 1;
- t->received[0] = NAN;
- t->sent[0] = NAN;
- }
- }
- dfe_done(t);
-}
-
-void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent) {
- if(!link)
- return;
-
- struct cgroup_netdev_link *t = dictionary_acquired_item_value(link);
-
- size_t slot = (t->read_slot) ? 0 : 1;
-
- if(isnan(t->received[slot]))
- t->received[slot] = received;
- else
- t->received[slot] += received;
-
- if(isnan(t->sent[slot]))
- t->sent[slot] = sent;
- else
- t->sent[slot] += sent;
-}
-
-void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NETDATA_DOUBLE *sent) {
- if(!cg->cgroup_netdev_link) {
- *received = NAN;
- *sent = NAN;
- return;
- }
-
- struct cgroup_netdev_link *t = dictionary_acquired_item_value(cg->cgroup_netdev_link);
-
- size_t slot = (t->read_slot) ? 1 : 0;
-
- *received = t->received[slot];
- *sent = t->sent[slot];
-}
-
-int cgroup_function_cgroup_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
- void *collector_data __maybe_unused,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
- void *register_canceller_cb_data __maybe_unused) {
-
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
-
- buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP);
- buffer_json_member_add_array(wb, "data");
-
- double max_pids = 0.0;
- double max_cpu = 0.0;
- double max_ram = 0.0;
- double max_disk_io_read = 0.0;
- double max_disk_io_written = 0.0;
- double max_net_received = 0.0;
- double max_net_sent = 0.0;
-
- RRDDIM *rd = NULL;
-
- uv_mutex_lock(&cgroup_root_mutex);
-
- for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || is_cgroup_systemd_service(cg)))
- continue;
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, cg->name); // Name
-
- if(k8s_is_kubepod(cg))
- buffer_json_add_array_item_string(wb, "k8s"); // Kind
- else
- buffer_json_add_array_item_string(wb, "cgroup"); // Kind
-
- double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0);
-
- double cpu = NAN;
- if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) {
- cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value;
- max_cpu = MAX(max_cpu, cpu);
- }
-
- double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0);
-
- rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read;
- double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0);
- rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written;
- double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0);
-
- NETDATA_DOUBLE received, sent;
- cgroup_netdev_get_bandwidth(cg, &received, &sent);
- if (!isnan(received) && !isnan(sent)) {
- received /= 1000.0;
- sent /= 1000.0;
- max_net_received = MAX(max_net_received, received);
- max_net_sent = MAX(max_net_sent, sent);
- }
-
- buffer_json_add_array_item_double(wb, pids_current);
- buffer_json_add_array_item_double(wb, cpu);
- buffer_json_add_array_item_double(wb, ram);
- buffer_json_add_array_item_double(wb, disk_io_read);
- buffer_json_add_array_item_double(wb, disk_io_written);
- buffer_json_add_array_item_double(wb, received);
- buffer_json_add_array_item_double(wb, sent);
-
- buffer_json_array_close(wb);
- }
-
- uv_mutex_unlock(&cgroup_root_mutex);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- // Node
- buffer_rrdf_table_add_field(wb, field_id++, "Name", "CGROUP Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- // Kind
- buffer_rrdf_table_add_field(wb, field_id++, "Kind", "CGROUP Kind",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // PIDs
- buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
- 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // CPU
- buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // RAM
- buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Disk IO Reads
- buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Disk IO Writes
- buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Network Received
- buffer_rrdf_table_add_field(wb, field_id++, "Received", "Network Traffic Received",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Mbps", max_net_received, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Network Sent
- buffer_rrdf_table_add_field(wb, field_id++, "Sent", "Network Traffic Sent ",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Mbps", max_net_sent, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- }
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "CPU");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "CPU");
- {
- buffer_json_member_add_string(wb, "name", "CPU");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "CPU");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Memory");
- {
- buffer_json_member_add_string(wb, "name", "Memory");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "RAM");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Traffic");
- {
- buffer_json_member_add_string(wb, "name", "Traffic");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Received");
- buffer_json_add_array_item_string(wb, "Sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "CPU");
- buffer_json_add_array_item_string(wb, "Name");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Memory");
- buffer_json_add_array_item_string(wb, "Name");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_object(wb, "group_by");
- {
- buffer_json_member_add_object(wb, "Kind");
- {
- buffer_json_member_add_string(wb, "name", "Kind");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Kind");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- int response = HTTP_RESP_OK;
- if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
- buffer_flush(wb);
- response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
- }
-
- if(result_cb)
- result_cb(wb, response, result_cb_data);
-
- return response;
-}
-
-int cgroup_function_systemd_top(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
- void *collector_data __maybe_unused,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
- void *register_canceller_cb_data __maybe_unused) {
-
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
-
- buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_CGTOP_HELP);
- buffer_json_member_add_array(wb, "data");
-
- double max_pids = 0.0;
- double max_cpu = 0.0;
- double max_ram = 0.0;
- double max_disk_io_read = 0.0;
- double max_disk_io_written = 0.0;
-
- RRDDIM *rd = NULL;
-
- uv_mutex_lock(&cgroup_root_mutex);
-
- for(struct cgroup *cg = cgroup_root; cg ; cg = cg->next) {
- if(unlikely(!cg->enabled || cg->pending_renames || !cg->function_ready || !is_cgroup_systemd_service(cg)))
- continue;
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, cg->name);
-
- double pids_current = rrddim_get_last_stored_value(cg->st_pids_rd_pids_current, &max_pids, 1.0);
-
- double cpu = NAN;
- if (cg->st_cpu_rd_user && cg->st_cpu_rd_system) {
- cpu = cg->st_cpu_rd_user->collector.last_stored_value + cg->st_cpu_rd_system->collector.last_stored_value;
- max_cpu = MAX(max_cpu, cpu);
- }
-
- double ram = rrddim_get_last_stored_value(cg->st_mem_rd_ram, &max_ram, 1.0);
-
- rd = cg->st_throttle_io_rd_read ? cg->st_throttle_io_rd_read : cg->st_io_rd_read;
- double disk_io_read = rrddim_get_last_stored_value(rd, &max_disk_io_read, 1024.0);
- rd = cg->st_throttle_io_rd_written ? cg->st_throttle_io_rd_written : cg->st_io_rd_written;
- double disk_io_written = rrddim_get_last_stored_value(rd, &max_disk_io_written, 1024.0);
-
- buffer_json_add_array_item_double(wb, pids_current);
- buffer_json_add_array_item_double(wb, cpu);
- buffer_json_add_array_item_double(wb, ram);
- buffer_json_add_array_item_double(wb, disk_io_read);
- buffer_json_add_array_item_double(wb, disk_io_written);
-
- buffer_json_array_close(wb);
- }
-
- uv_mutex_unlock(&cgroup_root_mutex);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- // Node
- buffer_rrdf_table_add_field(wb, field_id++, "Name", "Systemd Service Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- // PIDs
- buffer_rrdf_table_add_field(wb, field_id++, "PIDs", "Number of Processes Currently in the CGROUP",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
- 0, "pids", max_pids, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // CPU
- buffer_rrdf_table_add_field(wb, field_id++, "CPU", "CPU Usage",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "%", max_cpu, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // RAM
- buffer_rrdf_table_add_field(wb, field_id++, "RAM", "RAM Usage",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_ram, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Disk IO Reads
- buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Disk Read Data",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_disk_io_read, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- // Disk IO Writes
- buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Disk Written Data",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_disk_io_written, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "CPU");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "CPU");
- {
- buffer_json_member_add_string(wb, "name", "CPU");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "CPU");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Memory");
- {
- buffer_json_member_add_string(wb, "name", "Memory");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "RAM");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "CPU");
- buffer_json_add_array_item_string(wb, "Name");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Memory");
- buffer_json_add_array_item_string(wb, "Name");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- int response = HTTP_RESP_OK;
- if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
- buffer_flush(wb);
- response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
- }
-
- if(result_cb)
- result_cb(wb, response, result_cb_data);
-
- return response;
-}
diff --git a/collectors/cgroups.plugin/integrations/containers.md b/collectors/cgroups.plugin/integrations/containers.md
deleted file mode 100644
index 6273d1e91..000000000
--- a/collectors/cgroups.plugin/integrations/containers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Containers
-
-
-<img src="https://netdata.cloud/img/container.svg" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Containers for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/kubernetes_containers.md b/collectors/cgroups.plugin/integrations/kubernetes_containers.md
deleted file mode 100644
index 9be32a12a..000000000
--- a/collectors/cgroups.plugin/integrations/kubernetes_containers.md
+++ /dev/null
@@ -1,183 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/kubernetes_containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Kubernetes Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Kubernetes"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Kubernetes Containers
-
-
-<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Containers for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per k8s cgroup
-
-These metrics refer to the Pod container.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |
-| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |
-| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |
-| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |
-| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |
-| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |
-| k8s_kind | Instance kind: "pod" or "container". |
-| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |
-| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| k8s.cgroup.cpu_limit | used | percentage |
-| k8s.cgroup.cpu | user, system | percentage |
-| k8s.cgroup.cpu_per_core | a dimension per core | percentage |
-| k8s.cgroup.throttled | throttled | percentage |
-| k8s.cgroup.throttled_duration | duration | ms |
-| k8s.cgroup.cpu_shares | shares | shares |
-| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| k8s.cgroup.writeback | dirty, writeback | MiB |
-| k8s.cgroup.mem_activity | in, out | MiB/s |
-| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |
-| k8s.cgroup.mem_usage | ram, swap | MiB |
-| k8s.cgroup.mem_usage_limit | available, used | MiB |
-| k8s.cgroup.mem_utilization | utilization | percentage |
-| k8s.cgroup.mem_failcnt | failures | count |
-| k8s.cgroup.io | read, write | KiB/s |
-| k8s.cgroup.serviced_ops | read, write | operations/s |
-| k8s.cgroup.throttle_io | read, write | KiB/s |
-| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |
-| k8s.cgroup.queued_ops | read, write | operations |
-| k8s.cgroup.merged_ops | read, write | operations/s |
-| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |
-| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |
-| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.memory_some_pressure_stall_time | time | ms |
-| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.memory_full_pressure_stall_time | time | ms |
-| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.io_some_pressure_stall_time | time | ms |
-| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| k8s.cgroup.io_full_pressure_stall_time | time | ms |
-| k8s.cgroup.pids_current | pids | pids |
-
-### Per k8s cgroup network device
-
-These metrics refer to the Pod container network interface.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |
-| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |
-| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |
-| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |
-| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |
-| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |
-| k8s_kind | Instance kind: "pod" or "container". |
-| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |
-| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| k8s.cgroup.net_net | received, sent | kilobits/s |
-| k8s.cgroup.net_packets | received, sent, multicast | pps |
-| k8s.cgroup.net_errors | inbound, outbound | errors/s |
-| k8s.cgroup.net_drops | inbound, outbound | errors/s |
-| k8s.cgroup.net_fifo | receive, transmit | errors/s |
-| k8s.cgroup.net_compressed | receive, sent | pps |
-| k8s.cgroup.net_events | frames, collisions, carrier | events/s |
-| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| k8s.cgroup.net_carrier | up, down | state |
-| k8s.cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |
-| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/libvirt_containers.md b/collectors/cgroups.plugin/integrations/libvirt_containers.md
deleted file mode 100644
index fed454698..000000000
--- a/collectors/cgroups.plugin/integrations/libvirt_containers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/libvirt_containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Libvirt Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Libvirt Containers
-
-
-<img src="https://netdata.cloud/img/libvirt.png" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Libvirt for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/lxc_containers.md b/collectors/cgroups.plugin/integrations/lxc_containers.md
deleted file mode 100644
index 3f05ffd5f..000000000
--- a/collectors/cgroups.plugin/integrations/lxc_containers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/lxc_containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "LXC Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# LXC Containers
-
-
-<img src="https://netdata.cloud/img/lxc.png" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor LXC Containers for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/ovirt_containers.md b/collectors/cgroups.plugin/integrations/ovirt_containers.md
deleted file mode 100644
index 5771aeea1..000000000
--- a/collectors/cgroups.plugin/integrations/ovirt_containers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/ovirt_containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "oVirt Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# oVirt Containers
-
-
-<img src="https://netdata.cloud/img/ovirt.svg" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor oVirt for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/proxmox_containers.md b/collectors/cgroups.plugin/integrations/proxmox_containers.md
deleted file mode 100644
index 1804a40ca..000000000
--- a/collectors/cgroups.plugin/integrations/proxmox_containers.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/proxmox_containers.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Proxmox Containers"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Proxmox Containers
-
-
-<img src="https://netdata.cloud/img/proxmox.png" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Proxmox for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/systemd_services.md b/collectors/cgroups.plugin/integrations/systemd_services.md
deleted file mode 100644
index 0ce906366..000000000
--- a/collectors/cgroups.plugin/integrations/systemd_services.md
+++ /dev/null
@@ -1,112 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/systemd_services.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Systemd Services"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Systemd"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Systemd Services
-
-
-<img src="https://netdata.cloud/img/systemd.svg" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Containers for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per systemd service
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| service_name | Service name |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| systemd.service.cpu.utilization | user, system | percentage |
-| systemd.service.memory.usage | ram, swap | MiB |
-| systemd.service.memory.failcnt | fail | failures/s |
-| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |
-| systemd.service.memory.writeback | writeback, dirty | MiB |
-| systemd.service.memory.paging.faults | minor, major | MiB/s |
-| systemd.service.memory.paging.io | in, out | MiB/s |
-| systemd.service.disk.io | read, write | KiB/s |
-| systemd.service.disk.iops | read, write | operations/s |
-| systemd.service.disk.throttle.io | read, write | KiB/s |
-| systemd.service.disk.throttle.iops | read, write | operations/s |
-| systemd.service.disk.queued_iops | read, write | operations/s |
-| systemd.service.disk.merged_iops | read, write | operations/s |
-| systemd.service.pids.current | pids | pids |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/integrations/virtual_machines.md b/collectors/cgroups.plugin/integrations/virtual_machines.md
deleted file mode 100644
index 6a64923c4..000000000
--- a/collectors/cgroups.plugin/integrations/virtual_machines.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/integrations/virtual_machines.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cgroups.plugin/metadata.yaml"
-sidebar_label: "Virtual Machines"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Virtual Machines
-
-
-<img src="https://netdata.cloud/img/container.svg" width="150"/>
-
-
-Plugin: cgroups.plugin
-Module: /sys/fs/cgroup
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Virtual Machines for performance, resource usage, and health status.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cpu_limit | used | percentage |
-| cgroup.cpu | user, system | percentage |
-| cgroup.cpu_per_core | a dimension per core | percentage |
-| cgroup.throttled | throttled | percentage |
-| cgroup.throttled_duration | duration | ms |
-| cgroup.cpu_shares | shares | shares |
-| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |
-| cgroup.writeback | dirty, writeback | MiB |
-| cgroup.mem_activity | in, out | MiB/s |
-| cgroup.pgfaults | pgfault, swap | MiB/s |
-| cgroup.mem_usage | ram, swap | MiB |
-| cgroup.mem_usage_limit | available, used | MiB |
-| cgroup.mem_utilization | utilization | percentage |
-| cgroup.mem_failcnt | failures | count |
-| cgroup.io | read, write | KiB/s |
-| cgroup.serviced_ops | read, write | operations/s |
-| cgroup.throttle_io | read, write | KiB/s |
-| cgroup.throttle_serviced_ops | read, write | operations/s |
-| cgroup.queued_ops | read, write | operations |
-| cgroup.merged_ops | read, write | operations/s |
-| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_some_pressure_stall_time | time | ms |
-| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |
-| cgroup.cpu_full_pressure_stall_time | time | ms |
-| cgroup.memory_some_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_some_pressure_stall_time | time | ms |
-| cgroup.memory_full_pressure | some10, some60, some300 | percentage |
-| cgroup.memory_full_pressure_stall_time | time | ms |
-| cgroup.io_some_pressure | some10, some60, some300 | percentage |
-| cgroup.io_some_pressure_stall_time | time | ms |
-| cgroup.io_full_pressure | some10, some60, some300 | percentage |
-| cgroup.io_full_pressure_stall_time | time | ms |
-| cgroup.pids_current | pids | pids |
-
-### Per cgroup network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| container_name | The container name or group path if name resolution fails. |
-| image | Docker/Podman container image name. |
-| device | The name of the host network interface linked to the container's network interface. |
-| container_device | Container network interface name. |
-| interface_type | Network interface type. Always "virtual" for the containers. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_net | received, sent | kilobits/s |
-| cgroup.net_packets | received, sent, multicast | pps |
-| cgroup.net_errors | inbound, outbound | errors/s |
-| cgroup.net_drops | inbound, outbound | errors/s |
-| cgroup.net_fifo | receive, transmit | errors/s |
-| cgroup.net_compressed | receive, sent | pps |
-| cgroup.net_events | frames, collisions, carrier | events/s |
-| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| cgroup.net_carrier | up, down | state |
-| cgroup.net_mtu | mtu | octets |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |
-| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |
-| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cgroups.plugin/metadata.yaml b/collectors/cgroups.plugin/metadata.yaml
deleted file mode 100644
index a1abbb5a9..000000000
--- a/collectors/cgroups.plugin/metadata.yaml
+++ /dev/null
@@ -1,1022 +0,0 @@
-plugin_name: cgroups.plugin
-modules:
- - &module
- meta: &meta
- plugin_name: cgroups.plugin
- module_name: /sys/fs/cgroup
- monitored_instance:
- name: Containers
- link: ""
- categories:
- - data-collection.containers-and-vms
- icon_filename: container.svg
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - containers
- most_popular: true
- overview: &overview
- data_collection: &data_collection
- metrics_description: "Monitor Containers for performance, resource usage, and health status."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: cgroup_10min_cpu_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: cgroup.cpu_limit
- info: average cgroup CPU utilization over the last 10 minutes
- - name: cgroup_ram_in_use
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: cgroup.mem_usage
- info: cgroup memory utilization
- - name: cgroup_1m_received_packets_rate
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: cgroup.net_packets
- info: average number of packets received by the network interface ${label:device} over the last minute
- - name: cgroup_10s_received_packets_storm
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: cgroup.net_packets
- info:
- ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over
- the last minute
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels:
- - name: container_name
- description: The container name or group path if name resolution fails.
- - name: image
- description: Docker/Podman container image name.
- metrics:
- - name: cgroup.cpu_limit
- description: CPU Usage within the limits
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: used
- - name: cgroup.cpu
- description: CPU Usage (100% = 1 core)
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: cgroup.cpu_per_core
- description: CPU Usage (100% = 1 core) Per Core
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per core
- - name: cgroup.throttled
- description: CPU Throttled Runnable Periods
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: throttled
- - name: cgroup.throttled_duration
- description: CPU Throttled Time Duration
- unit: "ms"
- chart_type: line
- dimensions:
- - name: duration
- - name: cgroup.cpu_shares
- description: CPU Time Relative Share
- unit: "shares"
- chart_type: line
- dimensions:
- - name: shares
- - name: cgroup.mem
- description: Memory Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: cache
- - name: rss
- - name: swap
- - name: rss_huge
- - name: mapped_file
- - name: cgroup.writeback
- description: Writeback Memory
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: dirty
- - name: writeback
- - name: cgroup.mem_activity
- description: Memory Activity
- unit: "MiB/s"
- chart_type: line
- dimensions:
- - name: in
- - name: out
- - name: cgroup.pgfaults
- description: Memory Page Faults
- unit: "MiB/s"
- chart_type: line
- dimensions:
- - name: pgfault
- - name: swap
- - name: cgroup.mem_usage
- description: Used Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: ram
- - name: swap
- - name: cgroup.mem_usage_limit
- description: Used RAM within the limits
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: available
- - name: used
- - name: cgroup.mem_utilization
- description: Memory Utilization
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: utilization
- - name: cgroup.mem_failcnt
- description: Memory Limit Failures
- unit: "count"
- chart_type: line
- dimensions:
- - name: failures
- - name: cgroup.io
- description: I/O Bandwidth (all disks)
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: cgroup.serviced_ops
- description: Serviced I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: cgroup.throttle_io
- description: Throttle I/O Bandwidth (all disks)
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: cgroup.throttle_serviced_ops
- description: Throttle Serviced I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: cgroup.queued_ops
- description: Queued I/O Operations (all disks)
- unit: "operations"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: cgroup.merged_ops
- description: Merged I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: cgroup.cpu_some_pressure
- description: CPU some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.cpu_some_pressure_stall_time
- description: CPU some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.cpu_full_pressure
- description: CPU full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.cpu_full_pressure_stall_time
- description: CPU full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.memory_some_pressure
- description: Memory some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.memory_some_pressure_stall_time
- description: Memory some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.memory_full_pressure
- description: Memory full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.memory_full_pressure_stall_time
- description: Memory full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.io_some_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.io_some_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.io_full_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: cgroup.io_full_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: cgroup.pids_current
- description: Number of processes
- unit: "pids"
- chart_type: line
- dimensions:
- - name: pids
- - name: cgroup network device
- description: ""
- labels:
- - name: container_name
- description: The container name or group path if name resolution fails.
- - name: image
- description: Docker/Podman container image name.
- - name: device
- description: "The name of the host network interface linked to the container's network interface."
- - name: container_device
- description: Container network interface name.
- - name: interface_type
- description: 'Network interface type. Always "virtual" for the containers.'
- metrics:
- - name: cgroup.net_net
- description: Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: cgroup.net_packets
- description: Packets
- unit: "pps"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast
- - name: cgroup.net_errors
- description: Interface Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: cgroup.net_drops
- description: Interface Drops
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: cgroup.net_fifo
- description: Interface FIFO Buffer Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: receive
- - name: transmit
- - name: cgroup.net_compressed
- description: Interface FIFO Buffer Errors
- unit: "pps"
- chart_type: line
- dimensions:
- - name: receive
- - name: sent
- - name: cgroup.net_events
- description: Network Interface Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: frames
- - name: collisions
- - name: carrier
- - name: cgroup.net_operstate
- description: Interface Operational State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: notpresent
- - name: lowerlayerdown
- - name: testing
- - name: dormant
- - name: unknown
- - name: cgroup.net_carrier
- description: Interface Physical Link State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: cgroup.net_mtu
- description: Interface MTU
- unit: "octets"
- chart_type: line
- dimensions:
- - name: mtu
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: Kubernetes Containers
- link: https://kubernetes.io/
- icon_filename: kubernetes.svg
- categories:
- #- data-collection.containers-and-vms
- - data-collection.kubernetes
- keywords:
- - k8s
- - kubernetes
- - pods
- - containers
- overview:
- <<: *overview
- data-collection:
- <<: *data_collection
- metrics_description: Monitor Kubernetes Clusters for performance, resource usage, and health status.
- alerts:
- - name: k8s_cgroup_10min_cpu_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: k8s.cgroup.cpu_limit
- info: average cgroup CPU utilization over the last 10 minutes
- - name: k8s_cgroup_ram_in_use
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: k8s.cgroup.mem_usage
- info: cgroup memory utilization
- - name: k8s_cgroup_1m_received_packets_rate
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: k8s.cgroup.net_packets
- info: average number of packets received by the network interface ${label:device} over the last minute
- - name: k8s_cgroup_10s_received_packets_storm
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cgroups.conf
- metric: k8s.cgroup.net_packets
- info:
- ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over
- the last minute
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: k8s cgroup
- description: These metrics refer to the Pod container.
- labels:
- - name: k8s_node_name
- description: 'Node name. The value of _pod.spec.nodeName_.'
- - name: k8s_namespace
- description: 'Namespace name. The value of _pod.metadata.namespace_.'
- - name: k8s_controller_kind
- description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.'
- - name: k8s_controller_name
- description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.'
- - name: k8s_pod_name
- description: 'Pod name. The value of _pod.metadata.name_.'
- - name: k8s_container_name
- description: 'Container name. The value of _pod.spec.containers.name_.'
- - name: k8s_kind
- description: 'Instance kind: "pod" or "container".'
- - name: k8s_qos_class
- description: 'QoS class (guaranteed, burstable, besteffort).'
- - name: k8s_cluster_id
- description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.'
- metrics:
- - name: k8s.cgroup.cpu_limit
- description: CPU Usage within the limits
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: used
- - name: k8s.cgroup.cpu
- description: CPU Usage (100% = 1000 mCPU)
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: k8s.cgroup.cpu_per_core
- description: CPU Usage (100% = 1000 mCPU) Per Core
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per core
- - name: k8s.cgroup.throttled
- description: CPU Throttled Runnable Periods
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: throttled
- - name: k8s.cgroup.throttled_duration
- description: CPU Throttled Time Duration
- unit: "ms"
- chart_type: line
- dimensions:
- - name: duration
- - name: k8s.cgroup.cpu_shares
- description: CPU Time Relative Share
- unit: "shares"
- chart_type: line
- dimensions:
- - name: shares
- - name: k8s.cgroup.mem
- description: Memory Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: cache
- - name: rss
- - name: swap
- - name: rss_huge
- - name: mapped_file
- - name: k8s.cgroup.writeback
- description: Writeback Memory
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: dirty
- - name: writeback
- - name: k8s.cgroup.mem_activity
- description: Memory Activity
- unit: "MiB/s"
- chart_type: line
- dimensions:
- - name: in
- - name: out
- - name: k8s.cgroup.pgfaults
- description: Memory Page Faults
- unit: "MiB/s"
- chart_type: line
- dimensions:
- - name: pgfault
- - name: swap
- - name: k8s.cgroup.mem_usage
- description: Used Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: ram
- - name: swap
- - name: k8s.cgroup.mem_usage_limit
- description: Used RAM within the limits
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: available
- - name: used
- - name: k8s.cgroup.mem_utilization
- description: Memory Utilization
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: utilization
- - name: k8s.cgroup.mem_failcnt
- description: Memory Limit Failures
- unit: "count"
- chart_type: line
- dimensions:
- - name: failures
- - name: k8s.cgroup.io
- description: I/O Bandwidth (all disks)
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.serviced_ops
- description: Serviced I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.throttle_io
- description: Throttle I/O Bandwidth (all disks)
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.throttle_serviced_ops
- description: Throttle Serviced I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.queued_ops
- description: Queued I/O Operations (all disks)
- unit: "operations"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.merged_ops
- description: Merged I/O Operations (all disks)
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: k8s.cgroup.cpu_some_pressure
- description: CPU some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.cpu_some_pressure_stall_time
- description: CPU some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.cpu_full_pressure
- description: CPU full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.cpu_full_pressure_stall_time
- description: CPU full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.memory_some_pressure
- description: Memory some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.memory_some_pressure_stall_time
- description: Memory some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.memory_full_pressure
- description: Memory full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.memory_full_pressure_stall_time
- description: Memory full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.io_some_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.io_some_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.io_full_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: k8s.cgroup.io_full_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: k8s.cgroup.pids_current
- description: Number of processes
- unit: "pids"
- chart_type: line
- dimensions:
- - name: pids
- - name: k8s cgroup network device
- description: These metrics refer to the Pod container network interface.
- labels:
- - name: device
- description: "The name of the host network interface linked to the container's network interface."
- - name: container_device
- description: Container network interface name.
- - name: interface_type
- description: 'Network interface type. Always "virtual" for the containers.'
- - name: k8s_node_name
- description: 'Node name. The value of _pod.spec.nodeName_.'
- - name: k8s_namespace
- description: 'Namespace name. The value of _pod.metadata.namespace_.'
- - name: k8s_controller_kind
- description: 'Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_.'
- - name: k8s_controller_name
- description: 'Controller name.The value of _pod.OwnerReferences.Controller.Name_.'
- - name: k8s_pod_name
- description: 'Pod name. The value of _pod.metadata.name_.'
- - name: k8s_container_name
- description: 'Container name. The value of _pod.spec.containers.name_.'
- - name: k8s_kind
- description: 'Instance kind: "pod" or "container".'
- - name: k8s_qos_class
- description: 'QoS class (guaranteed, burstable, besteffort).'
- - name: k8s_cluster_id
- description: 'Cluster ID. The value of kube-system namespace _namespace.metadata.uid_.'
- metrics:
- - name: k8s.cgroup.net_net
- description: Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: k8s.cgroup.net_packets
- description: Packets
- unit: "pps"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast
- - name: k8s.cgroup.net_errors
- description: Interface Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: k8s.cgroup.net_drops
- description: Interface Drops
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: k8s.cgroup.net_fifo
- description: Interface FIFO Buffer Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: receive
- - name: transmit
- - name: k8s.cgroup.net_compressed
- description: Interface FIFO Buffer Errors
- unit: "pps"
- chart_type: line
- dimensions:
- - name: receive
- - name: sent
- - name: k8s.cgroup.net_events
- description: Network Interface Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: frames
- - name: collisions
- - name: carrier
- - name: k8s.cgroup.net_operstate
- description: Interface Operational State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: notpresent
- - name: lowerlayerdown
- - name: testing
- - name: dormant
- - name: unknown
- - name: k8s.cgroup.net_carrier
- description: Interface Physical Link State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: k8s.cgroup.net_mtu
- description: Interface MTU
- unit: "octets"
- chart_type: line
- dimensions:
- - name: mtu
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: Systemd Services
- link: ""
- icon_filename: systemd.svg
- categories:
- - data-collection.systemd
- keywords:
- - systemd
- - services
- overview:
- <<: *overview
- data-collection:
- <<: *data_collection
- metrics_desctiption: "Monitor Systemd Services for performance, resource usage, and health status."
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: systemd service
- description: ""
- labels:
- - name: service_name
- description: Service name
- metrics:
- - name: systemd.service.cpu.utilization
- description: Systemd Services CPU utilization (100% = 1 core)
- unit: percentage
- chart_type: stacked
- dimensions:
- - name: user
- - name: system
- - name: systemd.service.memory.usage
- description: Systemd Services Used Memory
- unit: MiB
- chart_type: stacked
- dimensions:
- - name: ram
- - name: swap
- - name: systemd.service.memory.failcnt
- description: Systemd Services Memory Limit Failures
- unit: failures/s
- chart_type: line
- dimensions:
- - name: fail
- - name: systemd.service.memory.ram.usage
- description: Systemd Services Memory
- unit: MiB
- chart_type: stacked
- dimensions:
- - name: rss
- - name: cache
- - name: mapped_file
- - name: rss_huge
- - name: systemd.service.memory.writeback
- description: Systemd Services Writeback Memory
- unit: MiB
- chart_type: stacked
- dimensions:
- - name: writeback
- - name: dirty
- - name: systemd.service.memory.paging.faults
- description: Systemd Services Memory Minor and Major Page Faults
- unit: MiB/s
- chart_type: area
- dimensions:
- - name: minor
- - name: major
- - name: systemd.service.memory.paging.io
- description: Systemd Services Memory Paging IO
- unit: MiB/s
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: systemd.service.disk.io
- description: Systemd Services Disk Read/Write Bandwidth
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.disk.iops
- description: Systemd Services Disk Read/Write Operations
- unit: operations/s
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.disk.throttle.io
- description: Systemd Services Throttle Disk Read/Write Bandwidth
- unit: KiB/s
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.disk.throttle.iops
- description: Systemd Services Throttle Disk Read/Write Operations
- unit: operations/s
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.disk.queued_iops
- description: Systemd Services Queued Disk Read/Write Operations
- unit: operations/s
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.disk.merged_iops
- description: Systemd Services Merged Disk Read/Write Operations
- unit: operations/s
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: systemd.service.pids.current
- description: Systemd Services Number of Processes
- unit: pids
- chart_type: line
- dimensions:
- - name: pids
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: Virtual Machines
- link: ""
- icon_filename: container.svg
- categories:
- - data-collection.containers-and-vms
- keywords:
- - vms
- - virtualization
- - container
- overview:
- <<: *overview
- data_collection:
- <<: *data_collection
- metrics_description: "Monitor Virtual Machines for performance, resource usage, and health status."
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: LXC Containers
- link: ""
- icon_filename: lxc.png
- categories:
- - data-collection.containers-and-vms
- keywords:
- - lxc
- - lxd
- - container
- overview:
- <<: *overview
- data_collection:
- <<: *data_collection
- metrics_description: "Monitor LXC Containers for performance, resource usage, and health status."
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: Libvirt Containers
- link: ""
- icon_filename: libvirt.png
- categories:
- - data-collection.containers-and-vms
- keywords:
- - libvirt
- - container
- overview:
- <<: *overview
- data_collection:
- <<: *data_collection
- metrics_description: "Monitor Libvirt for performance, resource usage, and health status."
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: oVirt Containers
- link: ""
- icon_filename: ovirt.svg
- categories:
- - data-collection.containers-and-vms
- keywords:
- - ovirt
- - container
- overview:
- <<: *overview
- data_collection:
- <<: *data_collection
- metrics_description: "Monitor oVirt for performance, resource usage, and health status."
- - <<: *module
- meta:
- <<: *meta
- monitored_instance:
- name: Proxmox Containers
- link: ""
- icon_filename: proxmox.png
- categories:
- - data-collection.containers-and-vms
- keywords:
- - proxmox
- - container
- overview:
- <<: *overview
- data_collection:
- <<: *data_collection
- metrics_description: "Monitor Proxmox for performance, resource usage, and health status."
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c
deleted file mode 100644
index 705edf6f7..000000000
--- a/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ /dev/null
@@ -1,1729 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "cgroup-internals.h"
-
-// main cgroups thread worker jobs
-#define WORKER_CGROUPS_LOCK 0
-#define WORKER_CGROUPS_READ 1
-#define WORKER_CGROUPS_CHART 2
-
-// ----------------------------------------------------------------------------
-// cgroup globals
-unsigned long long host_ram_total = 0;
-int is_inside_k8s = 0;
-long system_page_size = 4096; // system will be queried via sysconf() in configuration()
-int cgroup_enable_cpuacct_stat = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_cpuacct_usage = CONFIG_BOOLEAN_NO;
-int cgroup_enable_cpuacct_cpu_throttling = CONFIG_BOOLEAN_YES;
-int cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_NO;
-int cgroup_enable_memory = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_detailed_memory = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_memory_failcnt = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_swap = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_io = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_ops = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_throttle_io = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_throttle_ops = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_merged_ops = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_cpu = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_io_some = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_io_full = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_memory_some = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_pressure_irq_some = CONFIG_BOOLEAN_NO;
-int cgroup_enable_pressure_irq_full = CONFIG_BOOLEAN_AUTO;
-int cgroup_enable_systemd_services = CONFIG_BOOLEAN_YES;
-int cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
-int cgroup_used_memory = CONFIG_BOOLEAN_YES;
-int cgroup_use_unified_cgroups = CONFIG_BOOLEAN_NO;
-int cgroup_unified_exist = CONFIG_BOOLEAN_AUTO;
-int cgroup_search_in_devices = 1;
-int cgroup_check_for_new_every = 10;
-int cgroup_update_every = 1;
-int cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
-int cgroup_recheck_zero_blkio_every_iterations = 10;
-int cgroup_recheck_zero_mem_failcnt_every_iterations = 10;
-int cgroup_recheck_zero_mem_detailed_every_iterations = 10;
-char *cgroup_cpuacct_base = NULL;
-char *cgroup_cpuset_base = NULL;
-char *cgroup_blkio_base = NULL;
-char *cgroup_memory_base = NULL;
-char *cgroup_devices_base = NULL;
-char *cgroup_pids_base = NULL;
-char *cgroup_unified_base = NULL;
-int cgroup_root_count = 0;
-int cgroup_root_max = 1000;
-int cgroup_max_depth = 0;
-SIMPLE_PATTERN *enabled_cgroup_paths = NULL;
-SIMPLE_PATTERN *enabled_cgroup_names = NULL;
-SIMPLE_PATTERN *search_cgroup_paths = NULL;
-SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
-SIMPLE_PATTERN *systemd_services_cgroups = NULL;
-SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL;
-char *cgroups_network_interface_script = NULL;
-int cgroups_check = 0;
-uint32_t Read_hash = 0;
-uint32_t Write_hash = 0;
-uint32_t user_hash = 0;
-uint32_t system_hash = 0;
-uint32_t user_usec_hash = 0;
-uint32_t system_usec_hash = 0;
-uint32_t nr_periods_hash = 0;
-uint32_t nr_throttled_hash = 0;
-uint32_t throttled_time_hash = 0;
-uint32_t throttled_usec_hash = 0;
-
-// *** WARNING *** The fields are not thread safe. Take care of safe usage.
-struct cgroup *cgroup_root = NULL;
-uv_mutex_t cgroup_root_mutex;
-
-struct cgroups_systemd_config_setting cgroups_systemd_options[] = {
- { .name = "legacy", .setting = SYSTEMD_CGROUP_LEGACY },
- { .name = "hybrid", .setting = SYSTEMD_CGROUP_HYBRID },
- { .name = "unified", .setting = SYSTEMD_CGROUP_UNIFIED },
- { .name = NULL, .setting = SYSTEMD_CGROUP_ERR },
-};
-
-// Shared memory with information from detected cgroups
-netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL};
-int shm_fd_cgroup_ebpf = -1;
-sem_t *shm_mutex_cgroup_ebpf = SEM_FAILED;
-
-struct discovery_thread discovery_thread;
-
-
-/* on Fed systemd is not in PATH for some reason */
-#define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version"
-#define SYSTEMD_HIERARCHY_STRING "default-hierarchy="
-
-#define MAXSIZE_PROC_CMDLINE 4096
-static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec)
-{
- pid_t command_pid;
- enum cgroups_systemd_setting retval = SYSTEMD_CGROUP_ERR;
- char buf[MAXSIZE_PROC_CMDLINE];
- char *begin, *end;
-
- FILE *fp_child_input;
- FILE *fp_child_output = netdata_popen(exec, &command_pid, &fp_child_input);
-
- if (!fp_child_output)
- return retval;
-
- fd_set rfds;
- struct timeval timeout;
- int fd = fileno(fp_child_output);
- int ret = -1;
-
- FD_ZERO(&rfds);
- FD_SET(fd, &rfds);
- timeout.tv_sec = 3;
- timeout.tv_usec = 0;
-
- if (fd != -1) {
- ret = select(fd + 1, &rfds, NULL, NULL, &timeout);
- }
-
- if (ret == -1) {
- collector_error("Failed to get the output of \"%s\"", exec);
- } else if (ret == 0) {
- collector_info("Cannot get the output of \"%s\" within %"PRId64" seconds", exec, (int64_t)timeout.tv_sec);
- } else {
- while (fgets(buf, MAXSIZE_PROC_CMDLINE, fp_child_output) != NULL) {
- if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) {
- end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING);
- if (!*begin)
- break;
- while (isalpha(*end))
- end++;
- *end = 0;
- for (int i = 0; cgroups_systemd_options[i].name; i++) {
- if (!strcmp(begin, cgroups_systemd_options[i].name)) {
- retval = cgroups_systemd_options[i].setting;
- break;
- }
- }
- break;
- }
- }
- }
-
- if (netdata_pclose(fp_child_input, fp_child_output, command_pid))
- return SYSTEMD_CGROUP_ERR;
-
- return retval;
-}
-
-static enum cgroups_type cgroups_try_detect_version()
-{
- pid_t command_pid;
- char buf[MAXSIZE_PROC_CMDLINE];
- enum cgroups_systemd_setting systemd_setting;
- int cgroups2_available = 0;
-
- // 1. check if cgroups2 available on system at all
- FILE *fp_child_input;
- FILE *fp_child_output = netdata_popen("grep cgroup /proc/filesystems", &command_pid, &fp_child_input);
- if (!fp_child_output) {
- collector_error("popen failed");
- return CGROUPS_AUTODETECT_FAIL;
- }
- while (fgets(buf, MAXSIZE_PROC_CMDLINE, fp_child_output) != NULL) {
- if (strstr(buf, "cgroup2")) {
- cgroups2_available = 1;
- break;
- }
- }
- if(netdata_pclose(fp_child_input, fp_child_output, command_pid))
- return CGROUPS_AUTODETECT_FAIL;
-
- if(!cgroups2_available)
- return CGROUPS_V1;
-
-#if defined CGROUP2_SUPER_MAGIC
- // 2. check filesystem type for the default mountpoint
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/cgroup");
- struct statfs fsinfo;
- if (!statfs(filename, &fsinfo)) {
- if (fsinfo.f_type == CGROUP2_SUPER_MAGIC)
- return CGROUPS_V2;
- }
-#endif
-
- // 3. check systemd compiletime setting
- if ((systemd_setting = cgroups_detect_systemd("systemd --version")) == SYSTEMD_CGROUP_ERR)
- systemd_setting = cgroups_detect_systemd(SYSTEMD_CMD_RHEL);
-
- if(systemd_setting == SYSTEMD_CGROUP_ERR)
- return CGROUPS_AUTODETECT_FAIL;
-
- if(systemd_setting == SYSTEMD_CGROUP_LEGACY || systemd_setting == SYSTEMD_CGROUP_HYBRID) {
- // currently we prefer V1 if HYBRID is set as it seems to be more feature complete
- // in the future we might want to continue here if SYSTEMD_CGROUP_HYBRID
- // and go ahead with V2
- return CGROUPS_V1;
- }
-
- // 4. if we are unified as on Fedora (default cgroups2 only mode)
- // check kernel command line flag that can override that setting
- FILE *fp = fopen("/proc/cmdline", "r");
- if (!fp) {
- collector_error("Error reading kernel boot commandline parameters");
- return CGROUPS_AUTODETECT_FAIL;
- }
-
- if (!fgets(buf, MAXSIZE_PROC_CMDLINE, fp)) {
- collector_error("couldn't read all cmdline params into buffer");
- fclose(fp);
- return CGROUPS_AUTODETECT_FAIL;
- }
-
- fclose(fp);
-
- if (strstr(buf, "systemd.unified_cgroup_hierarchy=0")) {
- collector_info("cgroups v2 (unified cgroups) is available but are disabled on this system.");
- return CGROUPS_V1;
- }
- return CGROUPS_V2;
-}
-
-void set_cgroup_base_path(char *filename, char *path) {
- if (strncmp(netdata_configured_host_prefix, path, strlen(netdata_configured_host_prefix)) == 0) {
- snprintfz(filename, FILENAME_MAX, "%s", path);
- } else {
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, path);
- }
-}
-
-void read_cgroup_plugin_configuration() {
- system_page_size = sysconf(_SC_PAGESIZE);
-
- Read_hash = simple_hash("Read");
- Write_hash = simple_hash("Write");
- user_hash = simple_hash("user");
- system_hash = simple_hash("system");
- user_usec_hash = simple_hash("user_usec");
- system_usec_hash = simple_hash("system_usec");
- nr_periods_hash = simple_hash("nr_periods");
- nr_throttled_hash = simple_hash("nr_throttled");
- throttled_time_hash = simple_hash("throttled_time");
- throttled_usec_hash = simple_hash("throttled_usec");
-
- cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
- if(cgroup_update_every < localhost->rrd_update_every)
- cgroup_update_every = localhost->rrd_update_every;
-
- cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", (long long)cgroup_check_for_new_every * (long long)cgroup_update_every);
- if(cgroup_check_for_new_every < cgroup_update_every)
- cgroup_check_for_new_every = cgroup_update_every;
-
- cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO);
- if(cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO)
- cgroup_use_unified_cgroups = (cgroups_try_detect_version() == CGROUPS_V2);
-
- collector_info("use unified cgroups %s", cgroup_use_unified_cgroups ? "true" : "false");
-
- cgroup_containers_chart_priority = (int)config_get_number("plugin:cgroups", "containers priority", cgroup_containers_chart_priority);
- if(cgroup_containers_chart_priority < 1)
- cgroup_containers_chart_priority = NETDATA_CHART_PRIO_CGROUPS_CONTAINERS;
-
- cgroup_enable_cpuacct_stat = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct stat (total CPU)", cgroup_enable_cpuacct_stat);
- cgroup_enable_cpuacct_usage = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct usage (per core CPU)", cgroup_enable_cpuacct_usage);
- cgroup_enable_cpuacct_cpu_throttling = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct cpu throttling", cgroup_enable_cpuacct_cpu_throttling);
- cgroup_enable_cpuacct_cpu_shares = config_get_boolean_ondemand("plugin:cgroups", "enable cpuacct cpu shares", cgroup_enable_cpuacct_cpu_shares);
-
- cgroup_enable_memory = config_get_boolean_ondemand("plugin:cgroups", "enable memory", cgroup_enable_memory);
- cgroup_enable_detailed_memory = config_get_boolean_ondemand("plugin:cgroups", "enable detailed memory", cgroup_enable_detailed_memory);
- cgroup_enable_memory_failcnt = config_get_boolean_ondemand("plugin:cgroups", "enable memory limits fail count", cgroup_enable_memory_failcnt);
- cgroup_enable_swap = config_get_boolean_ondemand("plugin:cgroups", "enable swap memory", cgroup_enable_swap);
-
- cgroup_enable_blkio_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio bandwidth", cgroup_enable_blkio_io);
- cgroup_enable_blkio_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio operations", cgroup_enable_blkio_ops);
- cgroup_enable_blkio_throttle_io = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle bandwidth", cgroup_enable_blkio_throttle_io);
- cgroup_enable_blkio_throttle_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio throttle operations", cgroup_enable_blkio_throttle_ops);
- cgroup_enable_blkio_queued_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio queued operations", cgroup_enable_blkio_queued_ops);
- cgroup_enable_blkio_merged_ops = config_get_boolean_ondemand("plugin:cgroups", "enable blkio merged operations", cgroup_enable_blkio_merged_ops);
-
- cgroup_enable_pressure_cpu = config_get_boolean_ondemand("plugin:cgroups", "enable cpu pressure", cgroup_enable_pressure_cpu);
- cgroup_enable_pressure_io_some = config_get_boolean_ondemand("plugin:cgroups", "enable io some pressure", cgroup_enable_pressure_io_some);
- cgroup_enable_pressure_io_full = config_get_boolean_ondemand("plugin:cgroups", "enable io full pressure", cgroup_enable_pressure_io_full);
- cgroup_enable_pressure_memory_some = config_get_boolean_ondemand("plugin:cgroups", "enable memory some pressure", cgroup_enable_pressure_memory_some);
- cgroup_enable_pressure_memory_full = config_get_boolean_ondemand("plugin:cgroups", "enable memory full pressure", cgroup_enable_pressure_memory_full);
-
- cgroup_recheck_zero_blkio_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero blkio every iterations", cgroup_recheck_zero_blkio_every_iterations);
- cgroup_recheck_zero_mem_failcnt_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero memory failcnt every iterations", cgroup_recheck_zero_mem_failcnt_every_iterations);
- cgroup_recheck_zero_mem_detailed_every_iterations = (int)config_get_number("plugin:cgroups", "recheck zero detailed memory every iterations", cgroup_recheck_zero_mem_detailed_every_iterations);
-
- cgroup_enable_systemd_services = config_get_boolean("plugin:cgroups", "enable systemd services", cgroup_enable_systemd_services);
- cgroup_enable_systemd_services_detailed_memory = config_get_boolean("plugin:cgroups", "enable systemd services detailed memory", cgroup_enable_systemd_services_detailed_memory);
- cgroup_used_memory = config_get_boolean("plugin:cgroups", "report used memory", cgroup_used_memory);
-
- char filename[FILENAME_MAX + 1], *s;
- struct mountinfo *mi, *root = mountinfo_read(0);
- if(!cgroup_use_unified_cgroups) {
- // cgroup v1 does not have pressure metrics
- cgroup_enable_pressure_cpu =
- cgroup_enable_pressure_io_some =
- cgroup_enable_pressure_io_full =
- cgroup_enable_pressure_memory_some =
- cgroup_enable_pressure_memory_full = CONFIG_BOOLEAN_NO;
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuacct");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuacct");
- if (!mi) {
- collector_error("CGROUP: cannot find cpuacct mountinfo. Assuming default: /sys/fs/cgroup/cpuacct");
- s = "/sys/fs/cgroup/cpuacct";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_cpuacct_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuacct", filename);
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "cpuset");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "cpuset");
- if (!mi) {
- collector_error("CGROUP: cannot find cpuset mountinfo. Assuming default: /sys/fs/cgroup/cpuset");
- s = "/sys/fs/cgroup/cpuset";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_cpuset_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/cpuset", filename);
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "blkio");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "blkio");
- if (!mi) {
- collector_error("CGROUP: cannot find blkio mountinfo. Assuming default: /sys/fs/cgroup/blkio");
- s = "/sys/fs/cgroup/blkio";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_blkio_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/blkio", filename);
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "memory");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "memory");
- if (!mi) {
- collector_error("CGROUP: cannot find memory mountinfo. Assuming default: /sys/fs/cgroup/memory");
- s = "/sys/fs/cgroup/memory";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_memory_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/memory", filename);
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "devices");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "devices");
- if (!mi) {
- collector_error("CGROUP: cannot find devices mountinfo. Assuming default: /sys/fs/cgroup/devices");
- s = "/sys/fs/cgroup/devices";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_devices_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/devices", filename);
-
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup", "pids");
- if (!mi)
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup", "pids");
- if (!mi) {
- collector_error("CGROUP: cannot find pids mountinfo. Assuming default: /sys/fs/cgroup/pids");
- s = "/sys/fs/cgroup/pids";
- } else
- s = mi->mount_point;
- set_cgroup_base_path(filename, s);
- cgroup_pids_base = config_get("plugin:cgroups", "path to /sys/fs/cgroup/pids", filename);
- }
- else {
- //cgroup_enable_cpuacct_stat =
- cgroup_enable_cpuacct_usage =
- //cgroup_enable_memory =
- //cgroup_enable_detailed_memory =
- cgroup_enable_memory_failcnt =
- //cgroup_enable_swap =
- //cgroup_enable_blkio_io =
- //cgroup_enable_blkio_ops =
- cgroup_enable_blkio_throttle_io =
- cgroup_enable_blkio_throttle_ops =
- cgroup_enable_blkio_merged_ops =
- cgroup_enable_blkio_queued_ops = CONFIG_BOOLEAN_NO;
- cgroup_search_in_devices = 0;
- cgroup_enable_systemd_services_detailed_memory = CONFIG_BOOLEAN_NO;
- cgroup_used_memory = CONFIG_BOOLEAN_NO; //unified cgroups use different values
-
- //TODO: can there be more than 1 cgroup2 mount point?
- //there is no cgroup2 specific super option - for now use 'rw' option
- mi = mountinfo_find_by_filesystem_super_option(root, "cgroup2", "rw");
- if (!mi) {
- mi = mountinfo_find_by_filesystem_mount_source(root, "cgroup2", "cgroup");
- }
- if (!mi) {
- collector_error("CGROUP: cannot find cgroup2 mountinfo. Assuming default: /sys/fs/cgroup");
- s = "/sys/fs/cgroup";
- } else
- s = mi->mount_point;
-
- set_cgroup_base_path(filename, s);
- cgroup_unified_base = config_get("plugin:cgroups", "path to unified cgroups", filename);
- }
-
- cgroup_root_max = (int)config_get_number("plugin:cgroups", "max cgroups to allow", cgroup_root_max);
- cgroup_max_depth = (int)config_get_number("plugin:cgroups", "max cgroups depth to monitor", cgroup_max_depth);
-
- enabled_cgroup_paths = simple_pattern_create(
- config_get("plugin:cgroups", "enable by default cgroups matching",
- // ----------------------------------------------------------------
-
- " !*/init.scope " // ignore init.scope
- " !/system.slice/run-*.scope " // ignore system.slice/run-XXXX.scope
- " *.scope " // we need all other *.scope for sure
-
- // ----------------------------------------------------------------
-
- " /machine.slice/*.service " // #3367 systemd-nspawn
-
- // ----------------------------------------------------------------
-
- " */kubepods/pod*/* " // k8s containers
- " */kubepods/*/pod*/* " // k8s containers
- " */*-kubepods-pod*/* " // k8s containers
- " */*-kubepods-*-pod*/* " // k8s containers
- " !*kubepods* !*kubelet* " // all other k8s cgroups
-
- // ----------------------------------------------------------------
-
- " !*/vcpu* " // libvirtd adds these sub-cgroups
- " !*/emulator " // libvirtd adds these sub-cgroups
- " !*.mount "
- " !*.partition "
- " !*.service "
- " !*.service/udev "
- " !*.socket "
- " !*.slice "
- " !*.swap "
- " !*.user "
- " !/ "
- " !/docker "
- " !*/libvirt "
- " !/lxc "
- " !/lxc/*/* " // #1397 #2649
- " !/lxc.monitor* "
- " !/lxc.pivot "
- " !/lxc.payload "
- " !/machine "
- " !/qemu "
- " !/system "
- " !/systemd "
- " !/user "
- " * " // enable anything else
- ), NULL, SIMPLE_PATTERN_EXACT, true);
-
- enabled_cgroup_names = simple_pattern_create(
- config_get("plugin:cgroups", "enable by default cgroups names matching",
- " * "
- ), NULL, SIMPLE_PATTERN_EXACT, true);
-
- search_cgroup_paths = simple_pattern_create(
- config_get("plugin:cgroups", "search for cgroups in subpaths matching",
- " !*/init.scope " // ignore init.scope
- " !*-qemu " // #345
- " !*.libvirt-qemu " // #3010
- " !/init.scope "
- " !/system "
- " !/systemd "
- " !/user "
- " !/user.slice "
- " !/lxc/*/* " // #2161 #2649
- " !/lxc.monitor "
- " !/lxc.payload/*/* "
- " !/lxc.payload.* "
- " * "
- ), NULL, SIMPLE_PATTERN_EXACT, true);
-
- snprintfz(filename, FILENAME_MAX, "%s/cgroup-name.sh", netdata_configured_primary_plugins_dir);
- cgroups_rename_script = config_get("plugin:cgroups", "script to get cgroup names", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s/cgroup-network", netdata_configured_primary_plugins_dir);
- cgroups_network_interface_script = config_get("plugin:cgroups", "script to get cgroup network interfaces", filename);
-
- enabled_cgroup_renames = simple_pattern_create(
- config_get("plugin:cgroups", "run script to rename cgroups matching",
- " !/ "
- " !*.mount "
- " !*.socket "
- " !*.partition "
- " /machine.slice/*.service " // #3367 systemd-nspawn
- " !*.service "
- " !*.slice "
- " !*.swap "
- " !*.user "
- " !init.scope "
- " !*.scope/vcpu* " // libvirtd adds these sub-cgroups
- " !*.scope/emulator " // libvirtd adds these sub-cgroups
- " *.scope "
- " *docker* "
- " *lxc* "
- " *qemu* "
- " */kubepods/pod*/* " // k8s containers
- " */kubepods/*/pod*/* " // k8s containers
- " */*-kubepods-pod*/* " // k8s containers
- " */*-kubepods-*-pod*/* " // k8s containers
- " !*kubepods* !*kubelet* " // all other k8s cgroups
- " *.libvirt-qemu " // #3010
- " * "
- ), NULL, SIMPLE_PATTERN_EXACT, true);
-
- if(cgroup_enable_systemd_services) {
- systemd_services_cgroups = simple_pattern_create(
- config_get("plugin:cgroups", "cgroups to match as systemd services",
- " !/system.slice/*/*.service "
- " /system.slice/*.service "
- ), NULL, SIMPLE_PATTERN_EXACT, true);
- }
-
- mountinfo_free_all(root);
-}
-
-void netdata_cgroup_ebpf_set_values(size_t length)
-{
- sem_wait(shm_mutex_cgroup_ebpf);
-
- shm_cgroup_ebpf.header->cgroup_max = cgroup_root_max;
- shm_cgroup_ebpf.header->systemd_enabled = cgroup_enable_systemd_services |
- cgroup_enable_systemd_services_detailed_memory |
- cgroup_used_memory;
- shm_cgroup_ebpf.header->body_length = length;
-
- sem_post(shm_mutex_cgroup_ebpf);
-}
-
-void netdata_cgroup_ebpf_initialize_shm()
-{
- shm_fd_cgroup_ebpf = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_CREAT | O_RDWR, 0660);
- if (shm_fd_cgroup_ebpf < 0) {
- collector_error("Cannot initialize shared memory used by cgroup and eBPF, integration won't happen.");
- return;
- }
-
- size_t length = sizeof(netdata_ebpf_cgroup_shm_header_t) + cgroup_root_max * sizeof(netdata_ebpf_cgroup_shm_body_t);
- if (ftruncate(shm_fd_cgroup_ebpf, length)) {
- collector_error("Cannot set size for shared memory.");
- goto end_init_shm;
- }
-
- shm_cgroup_ebpf.header = (netdata_ebpf_cgroup_shm_header_t *) mmap(NULL, length,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- shm_fd_cgroup_ebpf, 0);
-
- if (unlikely(MAP_FAILED == shm_cgroup_ebpf.header)) {
- shm_cgroup_ebpf.header = NULL;
- collector_error("Cannot map shared memory used between cgroup and eBPF, integration won't happen");
- goto end_init_shm;
- }
- shm_cgroup_ebpf.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_cgroup_ebpf.header +
- sizeof(netdata_ebpf_cgroup_shm_header_t));
-
- shm_mutex_cgroup_ebpf = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, 1);
-
- if (shm_mutex_cgroup_ebpf != SEM_FAILED) {
- netdata_cgroup_ebpf_set_values(length);
- return;
- }
-
- collector_error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
- munmap(shm_cgroup_ebpf.header, length);
- shm_cgroup_ebpf.header = NULL;
-
-end_init_shm:
- close(shm_fd_cgroup_ebpf);
- shm_fd_cgroup_ebpf = -1;
- shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
-}
-
-// ---------------------------------------------------------------------------------------------
-
-static unsigned long long calc_delta(unsigned long long curr, unsigned long long prev) {
- if (prev > curr) {
- return 0;
- }
- return curr - prev;
-}
-
-static unsigned long long calc_percentage(unsigned long long value, unsigned long long total) {
- if (total == 0) {
- return 0;
- }
- return (unsigned long long)((NETDATA_DOUBLE)value / (NETDATA_DOUBLE)total * 100);
-}
-
-// ----------------------------------------------------------------------------
-// read values from /sys
-
-static inline void cgroup_read_cpuacct_stat(struct cpuacct_stat *cp) {
- static procfile *ff = NULL;
-
- if(likely(cp->filename)) {
- ff = procfile_reopen(ff, cp->filename, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- unsigned long i, lines = procfile_lines(ff);
-
- if(unlikely(lines < 1)) {
- collector_error("CGROUP: file '%s' should have 1+ lines.", cp->filename);
- cp->updated = 0;
- return;
- }
-
- for(i = 0; i < lines ; i++) {
- char *s = procfile_lineword(ff, i, 0);
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == user_hash && !strcmp(s, "user")))
- cp->user = str2ull(procfile_lineword(ff, i, 1), NULL);
-
- else if(unlikely(hash == system_hash && !strcmp(s, "system")))
- cp->system = str2ull(procfile_lineword(ff, i, 1), NULL);
- }
-
- cp->updated = 1;
-
- if(unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO &&
- (cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- cp->enabled = CONFIG_BOOLEAN_YES;
- }
-}
-
-static inline void cgroup_read_cpuacct_cpu_stat(struct cpuacct_cpu_throttling *cp) {
- if (unlikely(!cp->filename)) {
- return;
- }
-
- static procfile *ff = NULL;
- ff = procfile_reopen(ff, cp->filename, NULL, CGROUP_PROCFILE_FLAG);
- if (unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- unsigned long lines = procfile_lines(ff);
- if (unlikely(lines < 3)) {
- collector_error("CGROUP: file '%s' should have 3 lines.", cp->filename);
- cp->updated = 0;
- return;
- }
-
- unsigned long long nr_periods_last = cp->nr_periods;
- unsigned long long nr_throttled_last = cp->nr_throttled;
-
- for (unsigned long i = 0; i < lines; i++) {
- char *s = procfile_lineword(ff, i, 0);
- uint32_t hash = simple_hash(s);
-
- if (unlikely(hash == nr_periods_hash && !strcmp(s, "nr_periods"))) {
- cp->nr_periods = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == nr_throttled_hash && !strcmp(s, "nr_throttled"))) {
- cp->nr_throttled = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == throttled_time_hash && !strcmp(s, "throttled_time"))) {
- cp->throttled_time = str2ull(procfile_lineword(ff, i, 1), NULL);
- }
- }
- cp->nr_throttled_perc =
- calc_percentage(calc_delta(cp->nr_throttled, nr_throttled_last), calc_delta(cp->nr_periods, nr_periods_last));
-
- cp->updated = 1;
-
- if (unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO)) {
- if (likely(
- cp->nr_periods || cp->nr_throttled || cp->throttled_time ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) {
- cp->enabled = CONFIG_BOOLEAN_YES;
- }
- }
-}
-
-static inline void cgroup2_read_cpuacct_cpu_stat(struct cpuacct_stat *cp, struct cpuacct_cpu_throttling *cpt) {
- static procfile *ff = NULL;
- if (unlikely(!cp->filename)) {
- return;
- }
-
- ff = procfile_reopen(ff, cp->filename, NULL, CGROUP_PROCFILE_FLAG);
- if (unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- unsigned long lines = procfile_lines(ff);
-
- if (unlikely(lines < 3)) {
- collector_error("CGROUP: file '%s' should have at least 3 lines.", cp->filename);
- cp->updated = 0;
- return;
- }
-
- unsigned long long nr_periods_last = cpt->nr_periods;
- unsigned long long nr_throttled_last = cpt->nr_throttled;
-
- for (unsigned long i = 0; i < lines; i++) {
- char *s = procfile_lineword(ff, i, 0);
- uint32_t hash = simple_hash(s);
-
- if (unlikely(hash == user_usec_hash && !strcmp(s, "user_usec"))) {
- cp->user = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == system_usec_hash && !strcmp(s, "system_usec"))) {
- cp->system = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == nr_periods_hash && !strcmp(s, "nr_periods"))) {
- cpt->nr_periods = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == nr_throttled_hash && !strcmp(s, "nr_throttled"))) {
- cpt->nr_throttled = str2ull(procfile_lineword(ff, i, 1), NULL);
- } else if (unlikely(hash == throttled_usec_hash && !strcmp(s, "throttled_usec"))) {
- cpt->throttled_time = str2ull(procfile_lineword(ff, i, 1), NULL) * 1000; // usec -> ns
- }
- }
- cpt->nr_throttled_perc =
- calc_percentage(calc_delta(cpt->nr_throttled, nr_throttled_last), calc_delta(cpt->nr_periods, nr_periods_last));
-
- cp->updated = 1;
- cpt->updated = 1;
-
- if (unlikely(cp->enabled == CONFIG_BOOLEAN_AUTO)) {
- if (likely(cp->user || cp->system || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) {
- cp->enabled = CONFIG_BOOLEAN_YES;
- }
- }
- if (unlikely(cpt->enabled == CONFIG_BOOLEAN_AUTO)) {
- if (likely(
- cpt->nr_periods || cpt->nr_throttled || cpt->throttled_time ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) {
- cpt->enabled = CONFIG_BOOLEAN_YES;
- }
- }
-}
-
-static inline void cgroup_read_cpuacct_cpu_shares(struct cpuacct_cpu_shares *cp) {
- if (unlikely(!cp->filename)) {
- return;
- }
-
- if (unlikely(read_single_number_file(cp->filename, &cp->shares))) {
- cp->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- cp->updated = 1;
- if (unlikely((cp->enabled == CONFIG_BOOLEAN_AUTO)) &&
- (cp->shares || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)) {
- cp->enabled = CONFIG_BOOLEAN_YES;
- }
-}
-
-static inline void cgroup_read_cpuacct_usage(struct cpuacct_usage *ca) {
- static procfile *ff = NULL;
-
- if(likely(ca->filename)) {
- ff = procfile_reopen(ff, ca->filename, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- ca->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- ca->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- if(unlikely(procfile_lines(ff) < 1)) {
- collector_error("CGROUP: file '%s' should have 1+ lines but has %zu.", ca->filename, procfile_lines(ff));
- ca->updated = 0;
- return;
- }
-
- unsigned long i = procfile_linewords(ff, 0);
- if(unlikely(i == 0)) {
- ca->updated = 0;
- return;
- }
-
- // we may have 1 more CPU reported
- while(i > 0) {
- char *s = procfile_lineword(ff, 0, i - 1);
- if(!*s) i--;
- else break;
- }
-
- if(unlikely(i != ca->cpus)) {
- freez(ca->cpu_percpu);
- ca->cpu_percpu = mallocz(sizeof(unsigned long long) * i);
- ca->cpus = (unsigned int)i;
- }
-
- unsigned long long total = 0;
- for(i = 0; i < ca->cpus ;i++) {
- unsigned long long n = str2ull(procfile_lineword(ff, 0, i), NULL);
- ca->cpu_percpu[i] = n;
- total += n;
- }
-
- ca->updated = 1;
-
- if(unlikely(ca->enabled == CONFIG_BOOLEAN_AUTO &&
- (total || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- ca->enabled = CONFIG_BOOLEAN_YES;
- }
-}
-
-static inline void cgroup_read_blkio(struct blkio *io) {
- if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
- io->delay_counter--;
- return;
- }
-
- if(likely(io->filename)) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, io->filename, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- io->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- io->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- unsigned long i, lines = procfile_lines(ff);
-
- if(unlikely(lines < 1)) {
- collector_error("CGROUP: file '%s' should have 1+ lines.", io->filename);
- io->updated = 0;
- return;
- }
-
- io->Read = 0;
- io->Write = 0;
-/*
- io->Sync = 0;
- io->Async = 0;
- io->Total = 0;
-*/
-
- for(i = 0; i < lines ; i++) {
- char *s = procfile_lineword(ff, i, 1);
- uint32_t hash = simple_hash(s);
-
- if(unlikely(hash == Read_hash && !strcmp(s, "Read")))
- io->Read += str2ull(procfile_lineword(ff, i, 2), NULL);
-
- else if(unlikely(hash == Write_hash && !strcmp(s, "Write")))
- io->Write += str2ull(procfile_lineword(ff, i, 2), NULL);
-
-/*
- else if(unlikely(hash == Sync_hash && !strcmp(s, "Sync")))
- io->Sync += str2ull(procfile_lineword(ff, i, 2));
-
- else if(unlikely(hash == Async_hash && !strcmp(s, "Async")))
- io->Async += str2ull(procfile_lineword(ff, i, 2));
-
- else if(unlikely(hash == Total_hash && !strcmp(s, "Total")))
- io->Total += str2ull(procfile_lineword(ff, i, 2));
-*/
- }
-
- io->updated = 1;
-
- if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
- io->enabled = CONFIG_BOOLEAN_YES;
- else
- io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
- }
- }
-}
-
-static inline void cgroup2_read_blkio(struct blkio *io, unsigned int word_offset) {
- if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO && io->delay_counter > 0)) {
- io->delay_counter--;
- return;
- }
-
- if(likely(io->filename)) {
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, io->filename, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- io->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- io->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- unsigned long i, lines = procfile_lines(ff);
-
- if (unlikely(lines < 1)) {
- collector_error("CGROUP: file '%s' should have 1+ lines.", io->filename);
- io->updated = 0;
- return;
- }
-
- io->Read = 0;
- io->Write = 0;
-
- for (i = 0; i < lines; i++) {
- io->Read += str2ull(procfile_lineword(ff, i, 2 + word_offset), NULL);
- io->Write += str2ull(procfile_lineword(ff, i, 4 + word_offset), NULL);
- }
-
- io->updated = 1;
-
- if(unlikely(io->enabled == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(io->Read || io->Write || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
- io->enabled = CONFIG_BOOLEAN_YES;
- else
- io->delay_counter = cgroup_recheck_zero_blkio_every_iterations;
- }
- }
-}
-
-static inline void cgroup2_read_pressure(struct pressure *res) {
- static procfile *ff = NULL;
-
- if (likely(res->filename)) {
- ff = procfile_reopen(ff, res->filename, " =", CGROUP_PROCFILE_FLAG);
- if (unlikely(!ff)) {
- res->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) {
- res->updated = 0;
- cgroups_check = 1;
- return;
- }
-
- size_t lines = procfile_lines(ff);
- if (lines < 1) {
- collector_error("CGROUP: file '%s' should have 1+ lines.", res->filename);
- res->updated = 0;
- return;
- }
-
- bool did_some = false, did_full = false;
-
- for(size_t l = 0; l < lines ;l++) {
- const char *key = procfile_lineword(ff, l, 0);
- if(strcmp(key, "some") == 0) {
- res->some.share_time.value10 = strtod(procfile_lineword(ff, l, 2), NULL);
- res->some.share_time.value60 = strtod(procfile_lineword(ff, l, 4), NULL);
- res->some.share_time.value300 = strtod(procfile_lineword(ff, l, 6), NULL);
- res->some.total_time.value_total = str2ull(procfile_lineword(ff, l, 8), NULL) / 1000; // us->ms
- did_some = true;
- }
- else if(strcmp(key, "full") == 0) {
- res->full.share_time.value10 = strtod(procfile_lineword(ff, l, 2), NULL);
- res->full.share_time.value60 = strtod(procfile_lineword(ff, l, 4), NULL);
- res->full.share_time.value300 = strtod(procfile_lineword(ff, l, 6), NULL);
- res->full.total_time.value_total = str2ull(procfile_lineword(ff, l, 8), NULL) / 1000; // us->ms
- did_full = true;
- }
- }
-
- res->updated = (did_full || did_some) ? 1 : 0;
-
- if(unlikely(res->some.enabled == CONFIG_BOOLEAN_AUTO))
- res->some.enabled = (did_some) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
-
- if(unlikely(res->full.enabled == CONFIG_BOOLEAN_AUTO))
- res->full.enabled = (did_full) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
- }
-}
-
-static inline void cgroup_read_memory(struct memory *mem, char parent_cg_is_unified) {
- static procfile *ff = NULL;
-
- // read detailed ram usage
- if(likely(mem->filename_detailed)) {
- if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO && mem->delay_counter_detailed > 0)) {
- mem->delay_counter_detailed--;
- goto memory_next;
- }
-
- ff = procfile_reopen(ff, mem->filename_detailed, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- mem->updated_detailed = 0;
- cgroups_check = 1;
- goto memory_next;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- mem->updated_detailed = 0;
- cgroups_check = 1;
- goto memory_next;
- }
-
- unsigned long i, lines = procfile_lines(ff);
-
- if(unlikely(lines < 1)) {
- collector_error("CGROUP: file '%s' should have 1+ lines.", mem->filename_detailed);
- mem->updated_detailed = 0;
- goto memory_next;
- }
-
-
- if(unlikely(!mem->arl_base)) {
- if(parent_cg_is_unified == 0){
- mem->arl_base = arl_create("cgroup/memory", NULL, 60);
-
- arl_expect(mem->arl_base, "total_cache", &mem->total_cache);
- arl_expect(mem->arl_base, "total_rss", &mem->total_rss);
- arl_expect(mem->arl_base, "total_rss_huge", &mem->total_rss_huge);
- arl_expect(mem->arl_base, "total_mapped_file", &mem->total_mapped_file);
- arl_expect(mem->arl_base, "total_writeback", &mem->total_writeback);
- mem->arl_dirty = arl_expect(mem->arl_base, "total_dirty", &mem->total_dirty);
- mem->arl_swap = arl_expect(mem->arl_base, "total_swap", &mem->total_swap);
- arl_expect(mem->arl_base, "total_pgpgin", &mem->total_pgpgin);
- arl_expect(mem->arl_base, "total_pgpgout", &mem->total_pgpgout);
- arl_expect(mem->arl_base, "total_pgfault", &mem->total_pgfault);
- arl_expect(mem->arl_base, "total_pgmajfault", &mem->total_pgmajfault);
- arl_expect(mem->arl_base, "total_inactive_file", &mem->total_inactive_file);
- } else {
- mem->arl_base = arl_create("cgroup/memory", NULL, 60);
-
- arl_expect(mem->arl_base, "anon", &mem->anon);
- arl_expect(mem->arl_base, "kernel_stack", &mem->kernel_stack);
- arl_expect(mem->arl_base, "slab", &mem->slab);
- arl_expect(mem->arl_base, "sock", &mem->sock);
- arl_expect(mem->arl_base, "anon_thp", &mem->anon_thp);
- arl_expect(mem->arl_base, "file", &mem->total_mapped_file);
- arl_expect(mem->arl_base, "file_writeback", &mem->total_writeback);
- mem->arl_dirty = arl_expect(mem->arl_base, "file_dirty", &mem->total_dirty);
- arl_expect(mem->arl_base, "pgfault", &mem->total_pgfault);
- arl_expect(mem->arl_base, "pgmajfault", &mem->total_pgmajfault);
- arl_expect(mem->arl_base, "inactive_file", &mem->total_inactive_file);
- }
- }
-
- arl_begin(mem->arl_base);
-
- for(i = 0; i < lines ; i++) {
- if(arl_check(mem->arl_base,
- procfile_lineword(ff, i, 0),
- procfile_lineword(ff, i, 1))) break;
- }
-
- if(unlikely(mem->arl_dirty->flags & ARL_ENTRY_FLAG_FOUND))
- mem->detailed_has_dirty = 1;
-
- if(unlikely(parent_cg_is_unified == 0 && mem->arl_swap->flags & ARL_ENTRY_FLAG_FOUND))
- mem->detailed_has_swap = 1;
-
- // fprintf(stderr, "READ: '%s', cache: %llu, rss: %llu, rss_huge: %llu, mapped_file: %llu, writeback: %llu, dirty: %llu, swap: %llu, pgpgin: %llu, pgpgout: %llu, pgfault: %llu, pgmajfault: %llu, inactive_anon: %llu, active_anon: %llu, inactive_file: %llu, active_file: %llu, unevictable: %llu, hierarchical_memory_limit: %llu, total_cache: %llu, total_rss: %llu, total_rss_huge: %llu, total_mapped_file: %llu, total_writeback: %llu, total_dirty: %llu, total_swap: %llu, total_pgpgin: %llu, total_pgpgout: %llu, total_pgfault: %llu, total_pgmajfault: %llu, total_inactive_anon: %llu, total_active_anon: %llu, total_inactive_file: %llu, total_active_file: %llu, total_unevictable: %llu\n", mem->filename, mem->cache, mem->rss, mem->rss_huge, mem->mapped_file, mem->writeback, mem->dirty, mem->swap, mem->pgpgin, mem->pgpgout, mem->pgfault, mem->pgmajfault, mem->inactive_anon, mem->active_anon, mem->inactive_file, mem->active_file, mem->unevictable, mem->hierarchical_memory_limit, mem->total_cache, mem->total_rss, mem->total_rss_huge, mem->total_mapped_file, mem->total_writeback, mem->total_dirty, mem->total_swap, mem->total_pgpgin, mem->total_pgpgout, mem->total_pgfault, mem->total_pgmajfault, mem->total_inactive_anon, mem->total_active_anon, mem->total_inactive_file, mem->total_active_file, mem->total_unevictable);
-
- mem->updated_detailed = 1;
-
- if(unlikely(mem->enabled_detailed == CONFIG_BOOLEAN_AUTO)) {
- if(( (!parent_cg_is_unified) && ( mem->total_cache || mem->total_dirty || mem->total_rss || mem->total_rss_huge || mem->total_mapped_file || mem->total_writeback
- || mem->total_swap || mem->total_pgpgin || mem->total_pgpgout || mem->total_pgfault || mem->total_pgmajfault || mem->total_inactive_file))
- || (parent_cg_is_unified && ( mem->anon || mem->total_dirty || mem->kernel_stack || mem->slab || mem->sock || mem->total_writeback
- || mem->anon_thp || mem->total_pgfault || mem->total_pgmajfault || mem->total_inactive_file))
- || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)
- mem->enabled_detailed = CONFIG_BOOLEAN_YES;
- else
- mem->delay_counter_detailed = cgroup_recheck_zero_mem_detailed_every_iterations;
- }
- }
-
-memory_next:
-
- // read usage_in_bytes
- if(likely(mem->filename_usage_in_bytes)) {
- mem->updated_usage_in_bytes = !read_single_number_file(mem->filename_usage_in_bytes, &mem->usage_in_bytes);
- if(unlikely(mem->updated_usage_in_bytes && mem->enabled_usage_in_bytes == CONFIG_BOOLEAN_AUTO &&
- (mem->usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- mem->enabled_usage_in_bytes = CONFIG_BOOLEAN_YES;
- }
-
- if (likely(mem->updated_usage_in_bytes && mem->updated_detailed)) {
- mem->usage_in_bytes =
- (mem->usage_in_bytes > mem->total_inactive_file) ? (mem->usage_in_bytes - mem->total_inactive_file) : 0;
- }
-
- // read msw_usage_in_bytes
- if(likely(mem->filename_msw_usage_in_bytes)) {
- mem->updated_msw_usage_in_bytes = !read_single_number_file(mem->filename_msw_usage_in_bytes, &mem->msw_usage_in_bytes);
- if(unlikely(mem->updated_msw_usage_in_bytes && mem->enabled_msw_usage_in_bytes == CONFIG_BOOLEAN_AUTO &&
- (mem->msw_usage_in_bytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- mem->enabled_msw_usage_in_bytes = CONFIG_BOOLEAN_YES;
- }
-
- // read failcnt
- if(likely(mem->filename_failcnt)) {
- if(unlikely(mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO && mem->delay_counter_failcnt > 0)) {
- mem->updated_failcnt = 0;
- mem->delay_counter_failcnt--;
- }
- else {
- mem->updated_failcnt = !read_single_number_file(mem->filename_failcnt, &mem->failcnt);
- if(unlikely(mem->updated_failcnt && mem->enabled_failcnt == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(mem->failcnt || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))
- mem->enabled_failcnt = CONFIG_BOOLEAN_YES;
- else
- mem->delay_counter_failcnt = cgroup_recheck_zero_mem_failcnt_every_iterations;
- }
- }
- }
-}
-
-static void cgroup_read_pids_current(struct pids *pids) {
- pids->pids_current_updated = 0;
-
- if (unlikely(!pids->pids_current_filename))
- return;
-
- pids->pids_current_updated = !read_single_number_file(pids->pids_current_filename, &pids->pids_current);
-}
-
-static inline void read_cgroup(struct cgroup *cg) {
- netdata_log_debug(D_CGROUP, "reading metrics for cgroups '%s'", cg->id);
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- cgroup_read_cpuacct_stat(&cg->cpuacct_stat);
- cgroup_read_cpuacct_usage(&cg->cpuacct_usage);
- cgroup_read_cpuacct_cpu_stat(&cg->cpuacct_cpu_throttling);
- cgroup_read_cpuacct_cpu_shares(&cg->cpuacct_cpu_shares);
- cgroup_read_memory(&cg->memory, 0);
- cgroup_read_blkio(&cg->io_service_bytes);
- cgroup_read_blkio(&cg->io_serviced);
- cgroup_read_blkio(&cg->throttle_io_service_bytes);
- cgroup_read_blkio(&cg->throttle_io_serviced);
- cgroup_read_blkio(&cg->io_merged);
- cgroup_read_blkio(&cg->io_queued);
- cgroup_read_pids_current(&cg->pids);
- }
- else {
- //TODO: io_service_bytes and io_serviced use same file merge into 1 function
- cgroup2_read_blkio(&cg->io_service_bytes, 0);
- cgroup2_read_blkio(&cg->io_serviced, 4);
- cgroup2_read_cpuacct_cpu_stat(&cg->cpuacct_stat, &cg->cpuacct_cpu_throttling);
- cgroup_read_cpuacct_cpu_shares(&cg->cpuacct_cpu_shares);
- cgroup2_read_pressure(&cg->cpu_pressure);
- cgroup2_read_pressure(&cg->io_pressure);
- cgroup2_read_pressure(&cg->memory_pressure);
- cgroup2_read_pressure(&cg->irq_pressure);
- cgroup_read_memory(&cg->memory, 1);
- cgroup_read_pids_current(&cg->pids);
- }
-}
-
-static inline void read_all_discovered_cgroups(struct cgroup *root) {
- netdata_log_debug(D_CGROUP, "reading metrics for all cgroups");
-
- struct cgroup *cg;
- for (cg = root; cg; cg = cg->next) {
- if (cg->enabled && !cg->pending_renames) {
- read_cgroup(cg);
- }
- }
-}
-
-// update CPU and memory limits
-
-static inline void update_cpu_limits(char **filename, unsigned long long *value, struct cgroup *cg) {
- if(*filename) {
- int ret = -1;
-
- if(value == &cg->cpuset_cpus) {
- unsigned long ncpus = read_cpuset_cpus(*filename, get_system_cpus());
- if(ncpus) {
- *value = ncpus;
- ret = 0;
- }
- }
- else if(value == &cg->cpu_cfs_period || value == &cg->cpu_cfs_quota) {
- ret = read_single_number_file(*filename, value);
- }
- else ret = -1;
-
- if(ret) {
- collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
- freez(*filename);
- *filename = NULL;
- }
- }
-}
-
-static inline void update_cpu_limits2(struct cgroup *cg) {
- if(cg->filename_cpu_cfs_quota){
- static procfile *ff = NULL;
-
- ff = procfile_reopen(ff, cg->filename_cpu_cfs_quota, NULL, CGROUP_PROCFILE_FLAG);
- if(unlikely(!ff)) {
- goto cpu_limits2_err;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- goto cpu_limits2_err;
- }
-
- unsigned long lines = procfile_lines(ff);
-
- if (unlikely(lines < 1)) {
- collector_error("CGROUP: file '%s' should have 1 lines.", cg->filename_cpu_cfs_quota);
- return;
- }
-
- cg->cpu_cfs_period = str2ull(procfile_lineword(ff, 0, 1), NULL);
- cg->cpuset_cpus = get_system_cpus();
-
- char *s = "max\n\0";
- if(strcmp(s, procfile_lineword(ff, 0, 0)) == 0){
- cg->cpu_cfs_quota = cg->cpu_cfs_period * cg->cpuset_cpus;
- } else {
- cg->cpu_cfs_quota = str2ull(procfile_lineword(ff, 0, 0), NULL);
- }
- netdata_log_debug(D_CGROUP, "CPU limits values: %llu %llu %llu", cg->cpu_cfs_period, cg->cpuset_cpus, cg->cpu_cfs_quota);
- return;
-
-cpu_limits2_err:
- collector_error("Cannot refresh cgroup %s cpu limit by reading '%s'. Will not update its limit anymore.", cg->id, cg->filename_cpu_cfs_quota);
- freez(cg->filename_cpu_cfs_quota);
- cg->filename_cpu_cfs_quota = NULL;
-
- }
-}
-
-static inline int update_memory_limits(struct cgroup *cg) {
- char **filename = &cg->filename_memory_limit;
- const RRDSETVAR_ACQUIRED **chart_var = &cg->chart_var_memory_limit;
- unsigned long long *value = &cg->memory_limit;
-
- if(*filename) {
- if(unlikely(!*chart_var)) {
- *chart_var = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_mem_usage, "memory_limit");
- if(!*chart_var) {
- collector_error("Cannot create cgroup %s chart variable '%s'. Will not update its limit anymore.", cg->id, "memory_limit");
- freez(*filename);
- *filename = NULL;
- }
- }
-
- if(*filename && *chart_var) {
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- if(read_single_number_file(*filename, value)) {
- collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
- freez(*filename);
- *filename = NULL;
- }
- else {
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
- return 1;
- }
- } else {
- char buffer[30 + 1];
- int ret = read_file(*filename, buffer, 30);
- if(ret) {
- collector_error("Cannot refresh cgroup %s memory limit by reading '%s'. Will not update its limit anymore.", cg->id, *filename);
- freez(*filename);
- *filename = NULL;
- return 0;
- }
- char *s = "max\n\0";
- if(strcmp(s, buffer) == 0){
- *value = UINT64_MAX;
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
- return 1;
- }
- *value = str2ull(buffer, NULL);
- rrdsetvar_custom_chart_variable_set(cg->st_mem_usage, *chart_var, (NETDATA_DOUBLE)(*value) / (1024.0 * 1024.0));
- return 1;
- }
- }
- }
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// generate charts
-
-void update_cgroup_systemd_services_charts() {
- for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) {
- if (unlikely(!cg->enabled || cg->pending_renames || !is_cgroup_systemd_service(cg)))
- continue;
-
- if (likely(cg->cpuacct_stat.updated)) {
- update_cpu_utilization_chart(cg);
- }
- if (likely(cg->memory.updated_msw_usage_in_bytes)) {
- update_mem_usage_chart(cg);
- }
- if (likely(cg->memory.updated_failcnt)) {
- update_mem_failcnt_chart(cg);
- }
- if (likely(cg->memory.updated_detailed)) {
- update_mem_usage_detailed_chart(cg);
- update_mem_writeback_chart(cg);
- update_mem_pgfaults_chart(cg);
- if (!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- update_mem_activity_chart(cg);
- }
- }
- if (likely(cg->io_service_bytes.updated)) {
- update_io_serviced_bytes_chart(cg);
- }
- if (likely(cg->io_serviced.updated)) {
- update_io_serviced_ops_chart(cg);
- }
- if (likely(cg->throttle_io_service_bytes.updated)) {
- update_throttle_io_serviced_bytes_chart(cg);
- }
- if (likely(cg->throttle_io_serviced.updated)) {
- update_throttle_io_serviced_ops_chart(cg);
- }
- if (likely(cg->io_queued.updated)) {
- update_io_queued_ops_chart(cg);
- }
- if (likely(cg->io_merged.updated)) {
- update_io_merged_ops_chart(cg);
- }
-
- if (likely(cg->pids.pids_current_updated)) {
- update_pids_current_chart(cg);
- }
-
- cg->function_ready = true;
- }
-}
-
-void update_cgroup_charts() {
- for (struct cgroup *cg = cgroup_root; cg; cg = cg->next) {
- if(unlikely(!cg->enabled || cg->pending_renames || is_cgroup_systemd_service(cg)))
- continue;
-
- if (likely(cg->cpuacct_stat.updated && cg->cpuacct_stat.enabled == CONFIG_BOOLEAN_YES)) {
- update_cpu_utilization_chart(cg);
-
- if(likely(cg->filename_cpuset_cpus || cg->filename_cpu_cfs_period || cg->filename_cpu_cfs_quota)) {
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- update_cpu_limits(&cg->filename_cpuset_cpus, &cg->cpuset_cpus, cg);
- update_cpu_limits(&cg->filename_cpu_cfs_period, &cg->cpu_cfs_period, cg);
- update_cpu_limits(&cg->filename_cpu_cfs_quota, &cg->cpu_cfs_quota, cg);
- } else {
- update_cpu_limits2(cg);
- }
-
- if(unlikely(!cg->chart_var_cpu_limit)) {
- cg->chart_var_cpu_limit = rrdsetvar_custom_chart_variable_add_and_acquire(cg->st_cpu, "cpu_limit");
- if(!cg->chart_var_cpu_limit) {
- collector_error("Cannot create cgroup %s chart variable 'cpu_limit'. Will not update its limit anymore.", cg->id);
- if(cg->filename_cpuset_cpus) freez(cg->filename_cpuset_cpus);
- cg->filename_cpuset_cpus = NULL;
- if(cg->filename_cpu_cfs_period) freez(cg->filename_cpu_cfs_period);
- cg->filename_cpu_cfs_period = NULL;
- if(cg->filename_cpu_cfs_quota) freez(cg->filename_cpu_cfs_quota);
- cg->filename_cpu_cfs_quota = NULL;
- }
- } else {
- NETDATA_DOUBLE value = 0, quota = 0;
-
- if(likely( ((!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) && (cg->filename_cpuset_cpus || (cg->filename_cpu_cfs_period && cg->filename_cpu_cfs_quota)))
- || ((cg->options & CGROUP_OPTIONS_IS_UNIFIED) && cg->filename_cpu_cfs_quota))) {
- if(unlikely(cg->cpu_cfs_quota > 0))
- quota = (NETDATA_DOUBLE)cg->cpu_cfs_quota / (NETDATA_DOUBLE)cg->cpu_cfs_period;
-
- if(unlikely(quota > 0 && quota < cg->cpuset_cpus))
- value = quota * 100;
- else
- value = (NETDATA_DOUBLE)cg->cpuset_cpus * 100;
- }
- if(likely(value)) {
- update_cpu_utilization_limit_chart(cg, value);
- } else {
- if (unlikely(cg->st_cpu_limit)) {
- rrdset_is_obsolete___safe_from_collector_thread(cg->st_cpu_limit);
- cg->st_cpu_limit = NULL;
- }
- rrdsetvar_custom_chart_variable_set(cg->st_cpu, cg->chart_var_cpu_limit, NAN);
- }
- }
- }
- }
-
- if (likely(cg->cpuacct_cpu_throttling.updated && cg->cpuacct_cpu_throttling.enabled == CONFIG_BOOLEAN_YES)) {
- update_cpu_throttled_chart(cg);
- update_cpu_throttled_duration_chart(cg);
- }
-
- if (likely(cg->cpuacct_cpu_shares.updated && cg->cpuacct_cpu_shares.enabled == CONFIG_BOOLEAN_YES)) {
- update_cpu_shares_chart(cg);
- }
-
- if (likely(cg->cpuacct_usage.updated && cg->cpuacct_usage.enabled == CONFIG_BOOLEAN_YES)) {
- update_cpu_per_core_usage_chart(cg);
- }
-
- if (likely(cg->memory.updated_detailed && cg->memory.enabled_detailed == CONFIG_BOOLEAN_YES)) {
- update_mem_usage_detailed_chart(cg);
- update_mem_writeback_chart(cg);
-
- if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) {
- update_mem_activity_chart(cg);
- }
-
- update_mem_pgfaults_chart(cg);
- }
-
- if (likely(cg->memory.updated_usage_in_bytes && cg->memory.enabled_usage_in_bytes == CONFIG_BOOLEAN_YES)) {
- update_mem_usage_chart(cg);
-
- // FIXME: this if should be only for unlimited charts
- if(likely(host_ram_total)) {
- // FIXME: do we need to update mem limits on every data collection?
- if (likely(update_memory_limits(cg))) {
-
- unsigned long long memory_limit = host_ram_total;
- if (unlikely(cg->memory_limit < host_ram_total))
- memory_limit = cg->memory_limit;
-
- update_mem_usage_limit_chart(cg, memory_limit);
- update_mem_utilization_chart(cg, memory_limit);
- } else {
- if (unlikely(cg->st_mem_usage_limit)) {
- rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_usage_limit);
- cg->st_mem_usage_limit = NULL;
- }
-
- if (unlikely(cg->st_mem_utilization)) {
- rrdset_is_obsolete___safe_from_collector_thread(cg->st_mem_utilization);
- cg->st_mem_utilization = NULL;
- }
- }
- }
- }
-
- if (likely(cg->memory.updated_failcnt && cg->memory.enabled_failcnt == CONFIG_BOOLEAN_YES)) {
- update_mem_failcnt_chart(cg);
- }
-
- if (likely(cg->io_service_bytes.updated && cg->io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
- update_io_serviced_bytes_chart(cg);
- }
-
- if (likely(cg->io_serviced.updated && cg->io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
- update_io_serviced_ops_chart(cg);
- }
-
- if (likely(cg->throttle_io_service_bytes.updated && cg->throttle_io_service_bytes.enabled == CONFIG_BOOLEAN_YES)) {
- update_throttle_io_serviced_bytes_chart(cg);
- }
-
- if (likely(cg->throttle_io_serviced.updated && cg->throttle_io_serviced.enabled == CONFIG_BOOLEAN_YES)) {
- update_throttle_io_serviced_ops_chart(cg);
- }
-
- if (likely(cg->io_queued.updated && cg->io_queued.enabled == CONFIG_BOOLEAN_YES)) {
- update_io_queued_ops_chart(cg);
- }
-
- if (likely(cg->io_merged.updated && cg->io_merged.enabled == CONFIG_BOOLEAN_YES)) {
- update_io_merged_ops_chart(cg);
- }
-
- if (likely(cg->pids.pids_current_updated)) {
- update_pids_current_chart(cg);
- }
-
- if (cg->options & CGROUP_OPTIONS_IS_UNIFIED) {
- if (likely(cg->cpu_pressure.updated)) {
- if (cg->cpu_pressure.some.enabled) {
- update_cpu_some_pressure_chart(cg);
- update_cpu_some_pressure_stall_time_chart(cg);
- }
- if (cg->cpu_pressure.full.enabled) {
- update_cpu_full_pressure_chart(cg);
- update_cpu_full_pressure_stall_time_chart(cg);
- }
- }
-
- if (likely(cg->memory_pressure.updated)) {
- if (cg->memory_pressure.some.enabled) {
- update_mem_some_pressure_chart(cg);
- update_mem_some_pressure_stall_time_chart(cg);
- }
- if (cg->memory_pressure.full.enabled) {
- update_mem_full_pressure_chart(cg);
- update_mem_full_pressure_stall_time_chart(cg);
- }
- }
-
- if (likely(cg->irq_pressure.updated)) {
- if (cg->irq_pressure.some.enabled) {
- update_irq_some_pressure_chart(cg);
- update_irq_some_pressure_stall_time_chart(cg);
- }
- if (cg->irq_pressure.full.enabled) {
- update_irq_full_pressure_chart(cg);
- update_irq_full_pressure_stall_time_chart(cg);
- }
- }
-
- if (likely(cg->io_pressure.updated)) {
- if (cg->io_pressure.some.enabled) {
- update_io_some_pressure_chart(cg);
- update_io_some_pressure_stall_time_chart(cg);
- }
- if (cg->io_pressure.full.enabled) {
- update_io_full_pressure_chart(cg);
- update_io_full_pressure_stall_time_chart(cg);
- }
- }
- }
-
- cg->function_ready = true;
- }
-}
-
-// ----------------------------------------------------------------------------
-// cgroups main
-
-static void cgroup_main_cleanup(void *ptr) {
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- usec_t max = 2 * USEC_PER_SEC, step = 50000;
-
- if (!__atomic_load_n(&discovery_thread.exited, __ATOMIC_RELAXED)) {
- collector_info("waiting for discovery thread to finish...");
- while (!__atomic_load_n(&discovery_thread.exited, __ATOMIC_RELAXED) && max > 0) {
- uv_mutex_lock(&discovery_thread.mutex);
- uv_cond_signal(&discovery_thread.cond_var);
- uv_mutex_unlock(&discovery_thread.mutex);
- max -= step;
- sleep_usec(step);
- }
- }
-
- if (shm_mutex_cgroup_ebpf != SEM_FAILED) {
- sem_close(shm_mutex_cgroup_ebpf);
- }
-
- if (shm_cgroup_ebpf.header) {
- shm_cgroup_ebpf.header->cgroup_root_count = 0;
- munmap(shm_cgroup_ebpf.header, shm_cgroup_ebpf.header->body_length);
- }
-
- if (shm_fd_cgroup_ebpf > 0) {
- close(shm_fd_cgroup_ebpf);
- }
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void cgroup_read_host_total_ram() {
- procfile *ff = NULL;
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
-
- ff = procfile_open(
- config_get("plugin:cgroups", "meminfo filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
-
- if (likely((ff = procfile_readall(ff)) && procfile_lines(ff) && !strncmp(procfile_word(ff, 0), "MemTotal", 8)))
- host_ram_total = str2ull(procfile_word(ff, 1), NULL) * 1024;
- else
- collector_error("Cannot read file %s. Will not create RAM limit charts.", filename);
-
- procfile_close(ff);
-}
-
-void *cgroups_main(void *ptr) {
- worker_register("CGROUPS");
- worker_register_job_name(WORKER_CGROUPS_LOCK, "lock");
- worker_register_job_name(WORKER_CGROUPS_READ, "read");
- worker_register_job_name(WORKER_CGROUPS_CHART, "chart");
-
- netdata_thread_cleanup_push(cgroup_main_cleanup, ptr);
-
- if (getenv("KUBERNETES_SERVICE_HOST") != NULL && getenv("KUBERNETES_SERVICE_PORT") != NULL) {
- is_inside_k8s = 1;
- cgroup_enable_cpuacct_cpu_shares = CONFIG_BOOLEAN_YES;
- }
-
- read_cgroup_plugin_configuration();
-
- cgroup_read_host_total_ram();
-
- netdata_cgroup_ebpf_initialize_shm();
-
- if (uv_mutex_init(&cgroup_root_mutex)) {
- collector_error("CGROUP: cannot initialize mutex for the main cgroup list");
- goto exit;
- }
-
- discovery_thread.exited = 0;
-
- if (uv_mutex_init(&discovery_thread.mutex)) {
- collector_error("CGROUP: cannot initialize mutex for discovery thread");
- goto exit;
- }
- if (uv_cond_init(&discovery_thread.cond_var)) {
- collector_error("CGROUP: cannot initialize conditional variable for discovery thread");
- goto exit;
- }
-
- int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL);
- if (error) {
- collector_error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
- goto exit;
- }
-
- uv_thread_set_name_np(discovery_thread.thread, "P[cgroups]");
-
- // we register this only on localhost
- // for the other nodes, the origin server should register it
- rrd_collector_started(); // this creates a collector that runs for as long as netdata runs
- cgroup_netdev_link_init();
- rrd_function_add(localhost, NULL, "containers-vms", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_cgroup_top, NULL);
- rrd_function_add(localhost, NULL, "systemd-services", 10, RRDFUNCTIONS_CGTOP_HELP, true, cgroup_function_systemd_top, NULL);
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- usec_t step = cgroup_update_every * USEC_PER_SEC;
- usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
-
- netdata_thread_disable_cancelability();
-
- while(service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
-
- usec_t hb_dt = heartbeat_next(&hb, step);
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- find_dt += hb_dt;
- if (unlikely(find_dt >= find_every || (!is_inside_k8s && cgroups_check))) {
- uv_mutex_lock(&discovery_thread.mutex);
- uv_cond_signal(&discovery_thread.cond_var);
- uv_mutex_unlock(&discovery_thread.mutex);
- find_dt = 0;
- cgroups_check = 0;
- }
-
- worker_is_busy(WORKER_CGROUPS_LOCK);
- uv_mutex_lock(&cgroup_root_mutex);
-
- worker_is_busy(WORKER_CGROUPS_READ);
- read_all_discovered_cgroups(cgroup_root);
-
- if (unlikely(!service_running(SERVICE_COLLECTORS))) {
- uv_mutex_unlock(&cgroup_root_mutex);
- break;
- }
-
- worker_is_busy(WORKER_CGROUPS_CHART);
-
- update_cgroup_charts();
- if (cgroup_enable_systemd_services)
- update_cgroup_systemd_services_charts();
-
- if (unlikely(!service_running(SERVICE_COLLECTORS))) {
- uv_mutex_unlock(&cgroup_root_mutex);
- break;
- }
-
- worker_is_idle();
- uv_mutex_unlock(&cgroup_root_mutex);
- }
-
-exit:
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h
deleted file mode 100644
index e8cfcf5f6..000000000
--- a/collectors/cgroups.plugin/sys_fs_cgroup.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_SYS_FS_CGROUP_H
-#define NETDATA_SYS_FS_CGROUP_H 1
-
-#include "daemon/common.h"
-
-#define PLUGIN_CGROUPS_NAME "cgroups.plugin"
-#define PLUGIN_CGROUPS_MODULE_SYSTEMD_NAME "systemd"
-#define PLUGIN_CGROUPS_MODULE_CGROUPS_NAME "/sys/fs/cgroup"
-
-#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001
-#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002
-#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004
-
-typedef struct netdata_ebpf_cgroup_shm_header {
- int cgroup_root_count;
- int cgroup_max;
- int systemd_enabled;
- int __pad;
- size_t body_length;
-} netdata_ebpf_cgroup_shm_header_t;
-
-#define CGROUP_EBPF_NAME_SHARED_LENGTH 256
-
-typedef struct netdata_ebpf_cgroup_shm_body {
- // Considering what is exposed in this link https://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits
- // this length is enough to store what we want.
- char name[CGROUP_EBPF_NAME_SHARED_LENGTH];
- uint32_t hash;
- uint32_t options;
- int enabled;
- char path[FILENAME_MAX + 1];
-} netdata_ebpf_cgroup_shm_body_t;
-
-typedef struct netdata_ebpf_cgroup_shm {
- netdata_ebpf_cgroup_shm_header_t *header;
- netdata_ebpf_cgroup_shm_body_t *body;
-} netdata_ebpf_cgroup_shm_t;
-
-#define NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME "netdata_shm_cgroup_ebpf"
-#define NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME "/netdata_sem_cgroup_ebpf"
-
-#include "../proc.plugin/plugin_proc.h"
-
-char *cgroup_parse_resolved_name_and_labels(RRDLABELS *labels, char *data);
-
-#endif //NETDATA_SYS_FS_CGROUP_H
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c b/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
deleted file mode 100644
index bb1fb3988..000000000
--- a/collectors/cgroups.plugin/tests/test_cgroups_plugin.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_cgroups_plugin.h"
-#include "libnetdata/required_dummies.h"
-
-RRDHOST *localhost;
-int netdata_zero_metrics_enabled = 1;
-struct config netdata_config;
-char *netdata_configured_primary_plugins_dir = NULL;
-
-struct k8s_test_data {
- char *data;
- char *name;
- char *key[3];
- char *value[3];
-
- const char *result_key[3];
- const char *result_value[3];
- int result_ls[3];
- int i;
-};
-
-static int read_label_callback(const char *name, const char *value, void *data)
-{
- struct k8s_test_data *test_data = (struct k8s_test_data *)data;
-
- test_data->result_key[test_data->i] = name;
- test_data->result_value[test_data->i] = value;
-
- test_data->i++;
-
- return 1;
-}
-
-static void test_cgroup_parse_resolved_name(void **state)
-{
- UNUSED(state);
-
- RRDLABELS *labels = rrdlabels_create();
-
- struct k8s_test_data test_data[] = {
- // One label
- { .data = "name label1=\"value1\"",
- .name = "name",
- .key[0] = "label1", .value[0] = "value1" },
-
- // Three labels
- { .data = "name label1=\"value1\",label2=\"value2\",label3=\"value3\"",
- .name = "name",
- .key[0] = "label1", .value[0] = "value1",
- .key[1] = "label2", .value[1] = "value2",
- .key[2] = "label3", .value[2] = "value3" },
-
- // Comma at the end of the data string
- { .data = "name label1=\"value1\",",
- .name = "name",
- .key[0] = "label1", .value[0] = "value1" },
-
- // Equals sign in the value
- // { .data = "name label1=\"value=1\"",
- // .name = "name",
- // .key[0] = "label1", .value[0] = "value=1" },
-
- // Double quotation mark in the value
- // { .data = "name label1=\"value\"1\"",
- // .name = "name",
- // .key[0] = "label1", .value[0] = "value" },
-
- // Escaped double quotation mark in the value
- // { .data = "name label1=\"value\\\"1\"",
- // .name = "name",
- // .key[0] = "label1", .value[0] = "value\\\"1" },
-
- // Equals sign in the key
- // { .data = "name label=1=\"value1\"",
- // .name = "name",
- // .key[0] = "label", .value[0] = "1=\"value1\"" },
-
- // Skipped value
- // { .data = "name label1=,label2=\"value2\"",
- // .name = "name",
- // .key[0] = "label2", .value[0] = "value2" },
-
- // A pair of equals signs
- { .data = "name= =",
- .name = "name=" },
-
- // A pair of commas
- { .data = "name, ,",
- .name = "name," },
-
- { .data = NULL }
- };
-
- for (int i = 0; test_data[i].data != NULL; i++) {
- char *data = strdup(test_data[i].data);
-
- char *name = cgroup_parse_resolved_name_and_labels(labels, data);
-
- assert_string_equal(name, test_data[i].name);
-
- rrdlabels_walkthrough_read(labels, read_label_callback, &test_data[i]);
-
- for (int l = 0; l < 3 && test_data[i].key[l] != NULL; l++) {
- char *key = test_data[i].key[l];
- char *value = test_data[i].value[l];
-
- const char *result_key = test_data[i].result_key[l];
- const char *result_value = test_data[i].result_value[l];
- int ls = test_data[i].result_ls[l];
-
- assert_string_equal(key, result_key);
- assert_string_equal(value, result_value);
- assert_int_equal(RRDLABEL_SRC_AUTO | RRDLABEL_SRC_K8S, ls);
- }
-
- free(data);
- }
-}
-
-int main(void)
-{
- const struct CMUnitTest tests[] = {
- cmocka_unit_test(test_cgroup_parse_resolved_name),
- };
-
- int test_res = cmocka_run_group_tests_name("test_cgroup_parse_resolved_name", tests, NULL, NULL);
-
- return test_res;
-}
diff --git a/collectors/cgroups.plugin/tests/test_cgroups_plugin.h b/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
deleted file mode 100644
index 3d68e9230..000000000
--- a/collectors/cgroups.plugin/tests/test_cgroups_plugin.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef TEST_CGROUPS_PLUGIN_H
-#define TEST_CGROUPS_PLUGIN_H 1
-
-#include "libnetdata/libnetdata.h"
-
-#include "../sys_fs_cgroup.h"
-
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include <stdint.h>
-#include <cmocka.h>
-
-#endif /* TEST_CGROUPS_PLUGIN_H */
diff --git a/collectors/cgroups.plugin/tests/test_doubles.c b/collectors/cgroups.plugin/tests/test_doubles.c
deleted file mode 100644
index b13d4b19c..000000000
--- a/collectors/cgroups.plugin/tests/test_doubles.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_cgroups_plugin.h"
-
-void rrdset_is_obsolete___safe_from_collector_thread(RRDSET *st)
-{
- UNUSED(st);
-}
-
-void rrdset_isnot_obsolete___safe_from_collector_thread(RRDSET *st)
-{
- UNUSED(st);
-}
-
-struct mountinfo *mountinfo_read(int do_statvfs)
-{
- UNUSED(do_statvfs);
-
- return NULL;
-}
-
-struct mountinfo *
-mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source)
-{
- UNUSED(root);
- UNUSED(filesystem);
- UNUSED(mount_source);
-
- return NULL;
-}
-
-struct mountinfo *
-mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options)
-{
- UNUSED(root);
- UNUSED(filesystem);
- UNUSED(super_options);
-
- return NULL;
-}
-
-void mountinfo_free_all(struct mountinfo *mi)
-{
- UNUSED(mi);
-}
-
-RRDSET *rrdset_create_custom(
- RRDHOST *host, const char *type, const char *id, const char *name, const char *family, const char *context,
- const char *title, const char *units, const char *plugin, const char *module, long priority, int update_every,
- RRDSET_TYPE chart_type, RRD_MEMORY_MODE memory_mode, long history_entries)
-{
- UNUSED(host);
- UNUSED(type);
- UNUSED(id);
- UNUSED(name);
- UNUSED(family);
- UNUSED(context);
- UNUSED(title);
- UNUSED(units);
- UNUSED(plugin);
- UNUSED(module);
- UNUSED(priority);
- UNUSED(update_every);
- UNUSED(chart_type);
- UNUSED(memory_mode);
- UNUSED(history_entries);
-
- return NULL;
-}
-
-RRDDIM *rrddim_add_custom(
- RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divisor,
- RRD_ALGORITHM algorithm, RRD_MEMORY_MODE memory_mode)
-{
- UNUSED(st);
- UNUSED(id);
- UNUSED(name);
- UNUSED(multiplier);
- UNUSED(divisor);
- UNUSED(algorithm);
- UNUSED(memory_mode);
-
- return NULL;
-}
-
-collected_number rrddim_set(RRDSET *st, const char *id, collected_number value)
-{
- UNUSED(st);
- UNUSED(id);
- UNUSED(value);
-
- return 0;
-}
-
-collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
-{
- UNUSED(st);
- UNUSED(rd);
- UNUSED(value);
-
- return 0;
-}
-
-const RRDSETVAR_ACQUIRED *rrdsetvar_custom_chart_variable_add_and_acquire(RRDSET *st, const char *name)
-{
- UNUSED(st);
- UNUSED(name);
-
- return NULL;
-}
-
-void rrdsetvar_custom_chart_variable_set(RRDSET *st, const RRDSETVAR_ACQUIRED *rsa, NETDATA_DOUBLE value)
-{
- UNUSED(st);
- UNUSED(rsa);
- UNUSED(value);
-}
-
-void rrdset_next_usec(RRDSET *st, usec_t microseconds)
-{
- UNUSED(st);
- UNUSED(microseconds);
-}
-
-void rrdset_done(RRDSET *st)
-{
- UNUSED(st);
-}
-
-void update_pressure_charts(struct pressure_charts *charts)
-{
- UNUSED(charts);
-}
-
-void netdev_rename_device_add(
- const char *host_device, const char *container_device, const char *container_name, DICTIONARY *labels, const char *ctx_prefix)
-{
- UNUSED(host_device);
- UNUSED(container_device);
- UNUSED(container_name);
- UNUSED(labels);
- UNUSED(ctx_prefix);
-}
-
-void netdev_rename_device_del(const char *host_device)
-{
- UNUSED(host_device);
-}
-
-void rrdcalc_update_rrdlabels(RRDSET *st) {
- (void)st;
-}
-
-void db_execute(const char *cmd)
-{
- UNUSED(cmd);
-}
diff --git a/collectors/charts.d.plugin/Makefile.am b/collectors/charts.d.plugin/Makefile.am
deleted file mode 100644
index f82992fd4..000000000
--- a/collectors/charts.d.plugin/Makefile.am
+++ /dev/null
@@ -1,49 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- charts.d.plugin \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_libconfig_DATA = \
- charts.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- charts.d.dryrun-helper.sh \
- charts.d.plugin \
- loopsleepms.sh.inc \
- $(NULL)
-
-dist_noinst_DATA = \
- charts.d.plugin.in \
- README.md \
- $(NULL)
-
-dist_charts_SCRIPTS = \
- $(NULL)
-
-dist_charts_DATA = \
- $(NULL)
-
-userchartsconfigdir=$(configdir)/charts.d
-dist_userchartsconfig_DATA = \
- $(NULL)
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userchartsconfigdir)
-
-chartsconfigdir=$(libconfigdir)/charts.d
-dist_chartsconfig_DATA = \
- $(NULL)
-
-include ap/Makefile.inc
-include apcupsd/Makefile.inc
-include example/Makefile.inc
-include libreswan/Makefile.inc
-include opensips/Makefile.inc
-include sensors/Makefile.inc
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
deleted file mode 100644
index 97c2446fa..000000000
--- a/collectors/charts.d.plugin/README.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# charts.d.plugin
-
-`charts.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `BASH` v4+.
-
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by Netdata
-3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
-4. Supports any number of data collection **modules**
-
-To better understand the guidelines and the API behind our External plugins, please have a look at the [Introduction to External plugins](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md) prior to reading this page.
-
-
-`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
-memory, collecting data with as little overheads as possible
-(i.e. initialize once, repeatedly collect values with minimal overhead).
-
-`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
-The scripts should have the filename suffix: `.chart.sh`.
-
-By default, `charts.d.plugin` is not included as part of the install when using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md). You can install it by installing the `netdata-plugin-chartsd` package.
-
-## Configuration
-
-`charts.d.plugin` itself can be [configured](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script.
-
-In this file, you can place statements like this:
-
-```conf
-enable_all_charts="yes"
-X="yes"
-Y="no"
-```
-
-where `X` and `Y` are the names of individual charts.d collector scripts.
-When set to `yes`, charts.d will evaluate the collector script (see below).
-When set to `no`, charts.d will ignore the collector script.
-
-The variable `enable_all_charts` sets the default enable/disable state for all charts.
-
-## A charts.d module
-
-A `charts.d.plugin` module is a BASH script defining a few functions.
-
-For a module called `X`, the following criteria must be met:
-
-1. The module script must be called `X.chart.sh` and placed in `/usr/libexec/netdata/charts.d`.
-
-2. If the module needs a configuration, it should be called `X.conf` and placed in `/etc/netdata/charts.d`.
- The configuration file `X.conf` is also a BASH script itself.
- You can edit the default files supplied by Netdata, by editing `/etc/netdata/edit-config charts.d/X.conf`, where `X` is the name of the module.
-
-3. All functions and global variables defined in the script and its configuration, must begin with `X_`.
-
-4. The following functions must be defined:
-
- - `X_check()` - returns 0 or 1 depending on whether the module is able to run or not
- (following the standard Linux command line return codes: 0 = OK, the collector can operate and 1 = FAILED,
- the collector cannot be used).
-
- - `X_create()` - creates the Netdata charts (commands `CHART` and `DIMENSION`).
- The return value does matter: 0 = OK, 1 = FAILED.
-
- - `X_update()` - collects the values for the defined charts (commands `BEGIN`, `SET`, `END`).
- The return value also matters: 0 = OK, 1 = FAILED.
-
-5. The following global variables are available to be set:
- - `X_update_every` - is the data collection frequency for the module script, in seconds.
-
-The module script may use more functions or variables. But all of them must begin with `X_`.
-
-### X_check()
-
-The purpose of the BASH function `X_check()` is to check if the module can collect data (or check its config).
-
-For example, if the module is about monitoring a local mysql database, the `X_check()` function may attempt to
-connect to a local mysql database to find out if it can read the values it needs.
-
-`X_check()` is run only once for the lifetime of the module.
-
-### X_create()
-
-The purpose of the BASH function `X_create()` is to create the charts and dimensions using the standard Netdata
-plugin guidelines.
-
-`X_create()` will be called just once and only after `X_check()` was successful.
-You can however call it yourself when there is need for it (for example to add a new dimension to an existing chart).
-
-A non-zero return value will disable the collector.
-
-### X_update()
-
-`X_update()` will be called repeatedly every `X_update_every` seconds, to collect new values and send them to Netdata,
-following the Netdata plugin guidelines.
-
-The function will be called with one parameter: microseconds since the last time it was run. This value should be
-appended to the `BEGIN` statement of every chart updated by the collector script.
-
-A non-zero return value will disable the collector.
-
-### Useful functions charts.d provides
-
-Module scripts can use the following charts.d functions:
-
-#### require_cmd command
-
-`require_cmd()` will check if a command is available in the running system.
-
-For example, your `X_check()` function may use it like this:
-
-```sh
-mysql_check() {
- require_cmd mysql || return 1
- return 0
-}
-```
-
-Using the above, if the command `mysql` is not available in the system, the `mysql` module will be disabled.
-
-#### fixid "string"
-
-`fixid()` will get a string and return a properly formatted id for a chart or dimension.
-
-This is an expensive function that should not be used in `X_update()`.
-You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
-
-```sh
-declare -A X_ids=()
-X_create() {
- local name="a very bad name for id"
-
- X_ids[$name]="$(fixid "$name")"
-}
-
-X_update() {
- local microseconds="$1"
-
- ...
- local name="a very bad name for id"
- ...
-
- echo "BEGIN ${X_ids[$name]} $microseconds"
- ...
-}
-```
-
-### Debugging your collectors
-
-You can run `charts.d.plugin` by hand with something like this:
-
-```sh
-# become user netdata
-sudo su -s /bin/sh netdata
-
-# run the plugin in debug mode
-/usr/libexec/netdata/plugins.d/charts.d.plugin debug 1 X Y Z
-```
-
-Charts.d will run in `debug` mode, with an update frequency of `1`, evaluating only the collector scripts
-`X`, `Y` and `Z`. You can define zero or more module scripts. If none is defined, charts.d will evaluate all
-module scripts available.
-
-Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running
-`charts.d.plugin`:
-
-```sh
-export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
-```
-
-Also, remember that Netdata runs `chart.d.plugin` as user `netdata` (or any other user the `netdata` process is configured to run as).
-
-## Running multiple instances of charts.d.plugin
-
-`charts.d.plugin` will call the `X_update()` function one after another. This means that a delay in collector `X`
-will also delay the collection of `Y` and `Z`.
-
-You can have multiple `charts.d.plugin` running to overcome this problem.
-
-This is what you need to do:
-
-1. Decide a new name for the new charts.d instance: example `charts2.d`.
-
-2. Create/edit the files `/etc/netdata/charts.d.conf` and `/etc/netdata/charts2.d.conf` and enable / disable the
- module you want each to run. Remember to set `enable_all_charts="no"` to both of them, and enable the individual
- modules for each.
-
-3. link `/usr/libexec/netdata/plugins.d/charts.d.plugin` to `/usr/libexec/netdata/plugins.d/charts2.d.plugin`.
- Netdata will spawn a new charts.d process.
-
-Execute the above in this order, since Netdata will (by default) attempt to start new plugins soon after they are
-created in `/usr/libexec/netdata/plugins.d/`.
diff --git a/collectors/charts.d.plugin/ap/Makefile.inc b/collectors/charts.d.plugin/ap/Makefile.inc
deleted file mode 100644
index a2dd375ac..000000000
--- a/collectors/charts.d.plugin/ap/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += ap/ap.chart.sh
-dist_chartsconfig_DATA += ap/ap.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += ap/README.md ap/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
deleted file mode 120000
index 5b6e75130..000000000
--- a/collectors/charts.d.plugin/ap/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/access_points.md \ No newline at end of file
diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh
deleted file mode 100644
index 80c9dc602..000000000
--- a/collectors/charts.d.plugin/ap/ap.chart.sh
+++ /dev/null
@@ -1,179 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-ap_update_every=
-ap_priority=6900
-
-declare -A ap_devs=()
-
-# _check is called once, to find out if this chart should be enabled or not
-ap_check() {
- require_cmd iw || return 1
- local ev
- ev=$(run iw dev | awk '
- BEGIN {
- i = "";
- ssid = "";
- ap = 0;
- }
- /^[ \t]+Interface / {
- if( ap == 1 ) {
- print "ap_devs[" i "]=\"" ssid "\""
- }
-
- i = $2;
- ssid = "";
- ap = 0;
- }
- /^[ \t]+ssid / { ssid = $2; }
- /^[ \t]+type AP$/ { ap = 1; }
- END {
- if( ap == 1 ) {
- print "ap_devs[" i "]=\"" ssid "\""
- }
- }
- ')
- eval "${ev}"
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- [ ${#ap_devs[@]} -gt 0 ] && return 0
- error "no devices found in AP mode, with 'iw dev'"
- return 1
-}
-
-# _create is called once, to create the charts
-ap_create() {
- local ssid dev
-
- for dev in "${!ap_devs[@]}"; do
- ssid="${ap_devs[${dev}]}"
-
- # create the chart with 3 dimensions
- cat << EOF
-CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every '' '' 'ap'
-DIMENSION clients '' absolute 1 1
-
-CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every '' '' 'ap'
-DIMENSION received '' incremental 8 1024
-DIMENSION sent '' incremental -8 1024
-
-CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every '' '' 'ap'
-DIMENSION received '' incremental 1 1
-DIMENSION sent '' incremental -1 1
-
-CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every '' '' 'ap'
-DIMENSION retries 'tx retries' incremental 1 1
-DIMENSION failures 'tx failures' incremental -1 1
-
-CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every '' '' 'ap'
-DIMENSION signal 'average signal' absolute 1 1000
-
-CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every '' '' 'ap'
-DIMENSION receive '' absolute 1 1000
-DIMENSION transmit '' absolute -1 1000
-DIMENSION expected 'expected throughput' absolute 1 1000
-EOF
- done
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-ap_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- for dev in "${!ap_devs[@]}"; do
- echo
- echo "DEVICE ${dev}"
- iw "${dev}" station dump
- done | awk '
- function zero_data() {
- dev = "";
- c = 0;
- rb = 0;
- tb = 0;
- rp = 0;
- tp = 0;
- tr = 0;
- tf = 0;
- tt = 0;
- rt = 0;
- s = 0;
- g = 0;
- e = 0;
- }
- function print_device() {
- if(dev != "" && length(dev) > 0) {
- print "BEGIN ap_clients." dev;
- print "SET clients = " c;
- print "END";
- print "BEGIN ap_bandwidth." dev;
- print "SET received = " rb;
- print "SET sent = " tb;
- print "END";
- print "BEGIN ap_packets." dev;
- print "SET received = " rp;
- print "SET sent = " tp;
- print "END";
- print "BEGIN ap_issues." dev;
- print "SET retries = " tr;
- print "SET failures = " tf;
- print "END";
-
- if( c == 0 ) c = 1;
- print "BEGIN ap_signal." dev;
- print "SET signal = " int(s / c);
- print "END";
- print "BEGIN ap_bitrate." dev;
- print "SET receive = " int(rt / c);
- print "SET transmit = " int(tt / c);
- print "SET expected = " int(e / c);
- print "END";
- }
- zero_data();
- }
- BEGIN {
- zero_data();
- }
- /^DEVICE / {
- print_device();
- dev = $2;
- }
- /^Station/ { c++; }
- /^[ \t]+rx bytes:/ { rb += $3; }
- /^[ \t]+tx bytes:/ { tb += $3; }
- /^[ \t]+rx packets:/ { rp += $3; }
- /^[ \t]+tx packets:/ { tp += $3; }
- /^[ \t]+tx retries:/ { tr += $3; }
- /^[ \t]+tx failed:/ { tf += $3; }
- /^[ \t]+signal:/ { x = $2; s += x * 1000; }
- /^[ \t]+rx bitrate:/ { x = $3; rt += x * 1000; }
- /^[ \t]+tx bitrate:/ { x = $3; tt += x * 1000; }
- /^[ \t]+expected throughput:(.*)Mbps/ {
- x=$3;
- sub(/Mbps/, "", x);
- e += x * 1000;
- }
- END {
- print_device();
- }
- '
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/ap/ap.conf b/collectors/charts.d.plugin/ap/ap.conf
deleted file mode 100644
index 38fc157ce..000000000
--- a/collectors/charts.d.plugin/ap/ap.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# nothing fancy to configure.
-# this module will run
-# iw dev - to find wireless devices in AP mode
-# iw ${dev} station dump - to get connected clients
-# based on the above, it generates several charts
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#ap_update_every=
-
-# the charts priority on the dashboard
-#ap_priority=6900
-
-# the number of retries to do in case of failure
-# before disabling the module
-#ap_retries=10
diff --git a/collectors/charts.d.plugin/ap/integrations/access_points.md b/collectors/charts.d.plugin/ap/integrations/access_points.md
deleted file mode 100644
index a0de2c4df..000000000
--- a/collectors/charts.d.plugin/ap/integrations/access_points.md
+++ /dev/null
@@ -1,174 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/ap/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/ap/metadata.yaml"
-sidebar_label: "Access Points"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Access Points
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: ap
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-The ap collector visualizes data related to wireless access points.
-
-It uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin is able to auto-detect if you are running access points on your linux box.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per wireless device
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ap.clients | clients | clients |
-| ap.net | received, sent | kilobits/s |
-| ap.packets | received, sent | packets/s |
-| ap.issues | retries, failures | issues/s |
-| ap.signal | average signal | dBm |
-| ap.bitrate | receive, transmit, expected | Mbps |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### `iw` utility.
-
-Make sure the `iw` utility is installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/ap.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/ap.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the ap collector.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |
-| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Change the collection frequency
-
-Specify a custom collection frequence (update_every) for this collector
-
-```yaml
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-ap_update_every=10
-
-# the charts priority on the dashboard
-#ap_priority=6900
-
-# the number of retries to do in case of failure
-# before disabling the module
-#ap_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 ap
- ```
-
-
diff --git a/collectors/charts.d.plugin/ap/metadata.yaml b/collectors/charts.d.plugin/ap/metadata.yaml
deleted file mode 100644
index ee941e417..000000000
--- a/collectors/charts.d.plugin/ap/metadata.yaml
+++ /dev/null
@@ -1,146 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: ap
- monitored_instance:
- name: Access Points
- link: "https://learn.netdata.cloud/docs/data-collection/networking-stack-and-network-interfaces/linux-access-points"
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ap
- - access
- - point
- - wireless
- - network
- most_popular: false
- overview:
- data_collection:
- metrics_description: "The ap collector visualizes data related to wireless access points."
- method_description: "It uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics."
- supported_platforms:
- include: [Linux]
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The plugin is able to auto-detect if you are running access points on your linux box."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "`iw` utility."
- description: "Make sure the `iw` utility is installed."
- configuration:
- file:
- name: charts.d/ap.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the ap collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: ap_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: ap_priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 6900
- required: false
- - name: ap_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Change the collection frequency
- description: Specify a custom collection frequence (update_every) for this collector
- config: |
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- ap_update_every=10
-
- # the charts priority on the dashboard
- #ap_priority=6900
-
- # the number of retries to do in case of failure
- # before disabling the module
- #ap_retries=10
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: wireless device
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: ap.clients
- description: Connected clients to ${ssid} on ${dev}
- unit: "clients"
- chart_type: line
- dimensions:
- - name: clients
- - name: ap.net
- description: Bandwidth for ${ssid} on ${dev}
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ap.packets
- description: Packets for ${ssid} on ${dev}
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ap.issues
- description: Transmit Issues for ${ssid} on ${dev}
- unit: "issues/s"
- chart_type: line
- dimensions:
- - name: retries
- - name: failures
- - name: ap.signal
- description: Average Signal for ${ssid} on ${dev}
- unit: "dBm"
- chart_type: line
- dimensions:
- - name: average signal
- - name: ap.bitrate
- description: Bitrate for ${ssid} on ${dev}
- unit: "Mbps"
- chart_type: line
- dimensions:
- - name: receive
- - name: transmit
- - name: expected
diff --git a/collectors/charts.d.plugin/apcupsd/Makefile.inc b/collectors/charts.d.plugin/apcupsd/Makefile.inc
deleted file mode 100644
index 19cb9cad7..000000000
--- a/collectors/charts.d.plugin/apcupsd/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += apcupsd/apcupsd.chart.sh
-dist_chartsconfig_DATA += apcupsd/apcupsd.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += apcupsd/README.md apcupsd/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
deleted file mode 120000
index fc6681fe6..000000000
--- a/collectors/charts.d.plugin/apcupsd/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/apc_ups.md \ No newline at end of file
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
deleted file mode 100644
index da9cd19c3..000000000
--- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
+++ /dev/null
@@ -1,305 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-apcupsd_ip=
-apcupsd_port=
-
-declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551"
-)
-
-# how frequently to collect UPS data
-apcupsd_update_every=10
-
-apcupsd_timeout=3
-
-# the priority of apcupsd related to other charts
-apcupsd_priority=90000
-
-apcupsd_get() {
- run -t $apcupsd_timeout apcaccess status "$1"
-}
-
-is_ups_alive() {
- local status
- status="$(apcupsd_get "$1" | sed -e 's/STATUS.*: //' -e 't' -e 'd')"
- case "$status" in
- "" | "COMMLOST" | "SHUTTING DOWN") return 1 ;;
- *) return 0 ;;
- esac
-}
-
-apcupsd_check() {
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- require_cmd apcaccess || return 1
-
- # backwards compatibility
- if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
- apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
- fi
-
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
- failed=$((failed + 1))
- else
- if ! is_ups_alive ${apcupsd_sources[${host}]}; then
- error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
- failed=$((failed + 1))
- else
- working=$((working + 1))
- fi
- fi
- done
-
- if [ ${working} -eq 0 ]; then
- error "No APC UPSes found available."
- return 1
- fi
-
- return 0
-}
-
-apcupsd_create() {
- local host
- for host in "${!apcupsd_sources[@]}"; do
- # create the charts
- cat <<EOF
-CHART apcupsd_${host}.charge '' "UPS Charge" "percentage" ups apcupsd.charge area $((apcupsd_priority + 2)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION battery_charge charge absolute 1 100
-
-CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION battery_voltage voltage absolute 1 100
-DIMENSION battery_voltage_nominal nominal absolute 1 100
-
-CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 5)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION input_voltage voltage absolute 1 100
-DIMENSION input_voltage_min min absolute 1 100
-DIMENSION input_voltage_max max absolute 1 100
-
-CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 6)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION input_frequency frequency absolute 1 100
-
-CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 7)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION output_voltage voltage absolute 1 100
-DIMENSION output_voltage_nominal nominal absolute 1 100
-
-CHART apcupsd_${host}.load '' "UPS Load" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION load load absolute 1 100
-
-CHART apcupsd_${host}.load_usage '' "UPS Load Usage" "Watts" ups apcupsd.load_usage area $((apcupsd_priority + 1)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION load_usage load absolute 1 100
-
-CHART apcupsd_${host}.temp '' "UPS Temperature" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 8)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION temp temp absolute 1 100
-
-CHART apcupsd_${host}.time '' "UPS Time Remaining" "Minutes" ups apcupsd.time area $((apcupsd_priority + 3)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION time time absolute 1 100
-
-CHART apcupsd_${host}.online '' "UPS ONLINE flag" "boolean" ups apcupsd.online line $((apcupsd_priority + 9)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION online online absolute 1 1
-
-CHART apcupsd_${host}.selftest '' "UPS Self-Test status" "status" ups apcupsd.selftest line $((apcupsd_priority + 10)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION selftest_OK 'OK' absolute 1 1
-DIMENSION selftest_NO 'NO' absolute 1 1
-DIMENSION selftest_BT 'BT' absolute 1 1
-DIMENSION selftest_NG 'NG' absolute 1 1
-
-CHART apcupsd_${host}.status '' "UPS Status" "status" ups apcupsd.status line $((apcupsd_priority + 11)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION status_ONLINE 'ONLINE' absolute 1 1
-DIMENSION status_ONBATT 'ONBATT' absolute 1 1
-DIMENSION status_OVERLOAD 'OVERLOAD' absolute 1 1
-DIMENSION status_LOWBATT 'LOWBATT' absolute 1 1
-DIMENSION status_REPLACEBATT 'REPLACEBATT' absolute 1 1
-DIMENSION status_NOBATT 'NOBATT' absolute 1 1
-DIMENSION status_SLAVE 'SLAVE' absolute 1 1
-DIMENSION status_SLAVEDOWN 'SLAVEDOWN' absolute 1 1
-DIMENSION status_COMMLOST 'COMMLOST' absolute 1 1
-DIMENSION status_CAL 'CAL' absolute 1 1
-DIMENSION status_TRIM 'TRIM' absolute 1 1
-DIMENSION status_BOOST 'BOOST' absolute 1 1
-DIMENSION status_SHUTTING_DOWN 'SHUTTING_DOWN' absolute 1 1
-
-EOF
- done
- return 0
-}
-
-apcupsd_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- apcupsd_get "${apcupsd_sources[${host}]}" | awk "
-
-BEGIN {
- battery_charge = 0;
- battery_voltage = 0;
- battery_voltage_nominal = 0;
- input_voltage = 0;
- input_voltage_min = 0;
- input_voltage_max = 0;
- input_frequency = 0;
- output_voltage = 0;
- output_voltage_nominal = 0;
- load = 0;
- temp = 0;
- time = 0;
- nompower = 0;
- load_usage = 0;
- selftest_OK = 0;
- selftest_NO = 0;
- selftest_BT = 0;
- selftest_NG = 0;
- status_ONLINE = 0;
- status_CAL = 0;
- status_TRIM = 0;
- status_BOOST = 0;
- status_ONBATT = 0;
- status_OVERLOAD = 0;
- status_LOWBATT = 0;
- status_REPLACEBATT = 0;
- status_NOBATT = 0;
- status_SLAVE = 0;
- status_SLAVEDOWN = 0;
- status_COMMLOST = 0;
- status_SHUTTING_DOWN = 0;
-
-}
-/^BCHARGE.*/ { battery_charge = \$3 * 100 };
-/^BATTV.*/ { battery_voltage = \$3 * 100 };
-/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
-/^LINEV.*/ { input_voltage = \$3 * 100 };
-/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
-/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
-/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
-/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
-/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
-/^LOADPCT.*/ { load = \$3 * 100 };
-/^ITEMP.*/ { temp = \$3 * 100 };
-/^NOMPOWER.*/ { nompower = \$3 };
-/^TIMELEFT.*/ { time = \$3 * 100 };
-/^STATUS.*/ { online=(\$3 != \"COMMLOST\" && !(\$3 == \"SHUTTING\" && \$4 == \"DOWN\"))?1:0; };
-/^SELFTEST.*/ { selftest_OK = (\$3 == \"OK\") ? 1 : 0;
- selftest_NO = (\$3 == \"NO\") ? 1 : 0;
- selftest_BT = (\$3 == \"BT\") ? 1 : 0;
- selftest_NG = (\$3 == \"NG\") ? 1 : 0;
- };
-/^STATUS.*/ { status_ONLINE = (\$3 == \"ONLINE\") ? 1 : 0;
- status_CAL = (\$3 == \"CAL\") ? 1 : 0;
- status_TRIM = (\$3 == \"TRIM\") ? 1 : 0;
- status_BOOST = (\$3 == \"BOOST\") ? 1 : 0;
- status_ONBATT = (\$3 == \"ONBATT\") ? 1 : 0;
- status_OVERLOAD = (\$3 == \"OVERLOAD\") ? 1 : 0;
- status_LOWBATT = (\$3 == \"LOWBATT\") ? 1 : 0;
- status_REPLACEBATT = (\$3 == \"REPLACEBATT\") ? 1 : 0;
- status_NOBATT = (\$3 == \"NOBATT\") ? 1 : 0;
- status_SLAVE = (\$3 == \"SLAVE\") ? 1 : 0;
- status_SLAVEDOWN = (\$3 == \"SLAVEDOWN\") ? 1 : 0;
- status_COMMLOST = (\$3 == \"COMMLOST\") ? 1 : 0;
- status_SHUTTING_DOWN = (\$3 == \"SHUTTING\" && \$4 == \"DOWN\") ? 1 : 0;
- };
-
-END {
- { load_usage = nompower * load / 100 };
-
- print \"BEGIN apcupsd_${host}.online $1\";
- print \"SET online = \" online;
- print \"END\"
-
- if (online == 1) {
- print \"BEGIN apcupsd_${host}.charge $1\";
- print \"SET battery_charge = \" battery_charge;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.battery_voltage $1\";
- print \"SET battery_voltage = \" battery_voltage;
- print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_voltage $1\";
- print \"SET input_voltage = \" input_voltage;
- print \"SET input_voltage_min = \" input_voltage_min;
- print \"SET input_voltage_max = \" input_voltage_max;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_frequency $1\";
- print \"SET input_frequency = \" input_frequency;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.output_voltage $1\";
- print \"SET output_voltage = \" output_voltage;
- print \"SET output_voltage_nominal = \" output_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.load $1\";
- print \"SET load = \" load;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.load_usage $1\";
- print \"SET load_usage = \" load_usage;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.temp $1\";
- print \"SET temp = \" temp;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.time $1\";
- print \"SET time = \" time;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.selftest $1\";
- print \"SET selftest_OK = \" selftest_OK;
- print \"SET selftest_NO = \" selftest_NO;
- print \"SET selftest_BT = \" selftest_BT;
- print \"SET selftest_NG = \" selftest_NG;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.status $1\";
- print \"SET status_ONLINE = \" status_ONLINE;
- print \"SET status_ONBATT = \" status_ONBATT;
- print \"SET status_OVERLOAD = \" status_OVERLOAD;
- print \"SET status_LOWBATT = \" status_LOWBATT;
- print \"SET status_REPLACEBATT = \" status_REPLACEBATT;
- print \"SET status_NOBATT = \" status_NOBATT;
- print \"SET status_SLAVE = \" status_SLAVE;
- print \"SET status_SLAVEDOWN = \" status_SLAVEDOWN;
- print \"SET status_COMMLOST = \" status_COMMLOST;
- print \"SET status_CAL = \" status_CAL;
- print \"SET status_TRIM = \" status_TRIM;
- print \"SET status_BOOST = \" status_BOOST;
- print \"SET status_SHUTTING_DOWN = \" status_SHUTTING_DOWN;
- print \"END\";
- }
-}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- failed=$((failed + 1))
- error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
- else
- working=$((working + 1))
- fi
- done
-
- [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.conf b/collectors/charts.d.plugin/apcupsd/apcupsd.conf
deleted file mode 100644
index 679c0d61b..000000000
--- a/collectors/charts.d.plugin/apcupsd/apcupsd.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# add all your APC UPSes in this array - uncomment it too
-#declare -A apcupsd_sources=(
-# ["local"]="127.0.0.1:3551"
-#)
-
-# how long to wait for apcupsd to respond
-#apcupsd_timeout=3
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#apcupsd_update_every=10
-
-# the charts priority on the dashboard
-#apcupsd_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#apcupsd_retries=10
diff --git a/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md b/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md
deleted file mode 100644
index a5c1f9613..000000000
--- a/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md
+++ /dev/null
@@ -1,203 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/apcupsd/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/apcupsd/metadata.yaml"
-sidebar_label: "APC UPS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/UPS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# APC UPS
-
-
-<img src="https://netdata.cloud/img/apc.svg" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: apcupsd
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.
-
-The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per ups
-
-Metrics related to UPS. Each UPS provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| apcupsd.charge | charge | percentage |
-| apcupsd.battery.voltage | voltage, nominal | Volts |
-| apcupsd.input.voltage | voltage, min, max | Volts |
-| apcupsd.output.voltage | absolute, nominal | Volts |
-| apcupsd.input.frequency | frequency | Hz |
-| apcupsd.load | load | percentage |
-| apcupsd.load_usage | load | Watts |
-| apcupsd.temperature | temp | Celsius |
-| apcupsd.time | time | Minutes |
-| apcupsd.online | online | boolean |
-| apcupsd.selftest | OK, NO, BT, NG | status |
-| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |
-| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |
-| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |
-| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |
-| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |
-| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |
-| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |
-| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |
-| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |
-| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Required software
-
-Make sure the `apcaccess` and `apcupsd` are installed and running.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/apcupsd.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/apcupsd.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the apcupsd collector.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |
-| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |
-| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |
-| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Multiple apcupsd sources
-
-Specify a multiple apcupsd sources along with a custom update interval
-
-```yaml
-# add all your APC UPSes in this array - uncomment it too
-declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551",
- ["remote"]="1.2.3.4:3551"
-)
-
-# how long to wait for apcupsd to respond
-#apcupsd_timeout=3
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-apcupsd_update_every=5
-
-# the charts priority on the dashboard
-#apcupsd_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#apcupsd_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 apcupsd
- ```
-
-
diff --git a/collectors/charts.d.plugin/apcupsd/metadata.yaml b/collectors/charts.d.plugin/apcupsd/metadata.yaml
deleted file mode 100644
index c333dc964..000000000
--- a/collectors/charts.d.plugin/apcupsd/metadata.yaml
+++ /dev/null
@@ -1,256 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: apcupsd
- monitored_instance:
- name: APC UPS
- link: "https://www.apc.com"
- categories:
- - data-collection.ups
- icon_filename: "apc.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ups
- - apc
- - power
- - supply
- - battery
- - apcupsd
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics."
- method_description: "The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Required software"
- description: "Make sure the `apcaccess` and `apcupsd` are installed and running."
- configuration:
- file:
- name: charts.d/apcupsd.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the apcupsd collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: apcupsd_sources
- description: This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it.
- default_value: "127.0.0.1:3551"
- required: false
- - name: apcupsd_timeout
- description: How long to wait for apcupsd to respond.
- default_value: 3
- required: false
- - name: apcupsd_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: apcupsd_priority
- description: The charts priority on the dashboard.
- default_value: 90000
- required: false
- - name: apcupsd_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Multiple apcupsd sources
- description: Specify a multiple apcupsd sources along with a custom update interval
- config: |
- # add all your APC UPSes in this array - uncomment it too
- declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551",
- ["remote"]="1.2.3.4:3551"
- )
-
- # how long to wait for apcupsd to respond
- #apcupsd_timeout=3
-
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- apcupsd_update_every=5
-
- # the charts priority on the dashboard
- #apcupsd_priority=90000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #apcupsd_retries=10
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: apcupsd_ups_charge
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.charge
- info: average UPS charge over the last minute
- os: "*"
- - name: apcupsd_10min_ups_load
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.load
- info: average UPS load over the last 10 minutes
- os: "*"
- - name: apcupsd_last_collected_secs
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.load
- info: number of seconds since the last successful data collection
- - name: apcupsd_selftest_warning
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.selftest
- info: self-test failed due to insufficient battery capacity or due to overload.
- - name: apcupsd_status_onbatt
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS has switched to battery power because the input power has failed
- - name: apcupsd_status_overload
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS is overloaded and cannot supply enough power to the load
- - name: apcupsd_status_lowbatt
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS battery is low and needs to be recharged
- - name: apcupsd_status_replacebatt
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS battery has reached the end of its lifespan and needs to be replaced
- - name: apcupsd_status_nobatt
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS has no battery
- - name: apcupsd_status_commlost
- link: https://github.com/netdata/netdata/blob/master/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS communication link is lost
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: ups
- description: "Metrics related to UPS. Each UPS provides its own set of the following metrics."
- labels: []
- metrics:
- - name: apcupsd.charge
- description: UPS Charge
- unit: "percentage"
- chart_type: area
- dimensions:
- - name: charge
- - name: apcupsd.battery.voltage
- description: UPS Battery Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: voltage
- - name: nominal
- - name: apcupsd.input.voltage
- description: UPS Input Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: voltage
- - name: min
- - name: max
- - name: apcupsd.output.voltage
- description: UPS Output Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: absolute
- - name: nominal
- - name: apcupsd.input.frequency
- description: UPS Input Voltage
- unit: "Hz"
- chart_type: line
- dimensions:
- - name: frequency
- - name: apcupsd.load
- description: UPS Load
- unit: "percentage"
- chart_type: area
- dimensions:
- - name: load
- - name: apcupsd.load_usage
- description: UPS Load Usage
- unit: "Watts"
- chart_type: area
- dimensions:
- - name: load
- - name: apcupsd.temperature
- description: UPS Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: temp
- - name: apcupsd.time
- description: UPS Time Remaining
- unit: "Minutes"
- chart_type: area
- dimensions:
- - name: time
- - name: apcupsd.online
- description: UPS ONLINE flag
- unit: "boolean"
- chart_type: line
- dimensions:
- - name: online
- - name: apcupsd.selftest
- description: UPS Self-Test status
- unit: status
- chart_type: line
- dimensions:
- - name: OK
- - name: NO
- - name: BT
- - name: NG
- - name: apcupsd.status
- description: UPS Status
- unit: status
- chart_type: line
- dimensions:
- - name: ONLINE
- - name: ONBATT
- - name: OVERLOAD
- - name: LOWBATT
- - name: REPLACEBATT
- - name: NOBATT
- - name: SLAVE
- - name: SLAVEDOWN
- - name: COMMLOST
- - name: CAL
- - name: TRIM
- - name: BOOST
- - name: SHUTTING_DOWN
diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf
deleted file mode 100644
index 4614f259e..000000000
--- a/collectors/charts.d.plugin/charts.d.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# This is the configuration for charts.d.plugin
-
-# Each of its collectors can read configuration either from this file
-# or a NAME.conf file (where NAME is the collector name).
-# The collector specific file has higher precedence.
-
-# This file is a shell script too.
-
-# -----------------------------------------------------------------------------
-
-# number of seconds to run without restart
-# after this time, charts.d.plugin will exit
-# netdata will restart it, but a small gap
-# will appear in the charts.d.plugin charts.
-#restart_timeout=$[3600 * 4]
-
-# when making iterations, charts.d can loop more frequently
-# to prevent plugins missing iterations.
-# this is a percentage relative to update_every to align its
-# iterations.
-# The minimum is 10%, the maximum 100%.
-# So, if update_every is 1 second and time_divisor is 50,
-# charts.d will iterate every 500ms.
-# Charts will be called to collect data only if the time
-# passed since the last time the collected data is equal or
-# above their update_every.
-#time_divisor=50
-
-# -----------------------------------------------------------------------------
-
-# the default enable/disable for all charts.d collectors
-# the default is "yes"
-# enable_all_charts="yes"
-
-# BY DEFAULT ENABLED MODULES
-# ap=yes
-# apcupsd=yes
-# libreswan=yes
-# opensips=yes
-
-# -----------------------------------------------------------------------------
-# THESE NEED TO BE SET TO "force" TO BE ENABLED
-
-# Nothing useful.
-# Just an example charts.d plugin you can use as a template.
-# example=force
-# sensors=force
diff --git a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh b/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
deleted file mode 100755
index 91af2c542..000000000
--- a/collectors/charts.d.plugin/charts.d.dryrun-helper.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# shellcheck disable=SC2181
-
-# will stop the script for any error
-set -e
-
-me="$0"
-name="$1"
-chart="$2"
-conf="$3"
-
-can_diff=1
-
-tmp1="$(mktemp)"
-tmp2="$(mktemp)"
-
-myset() {
- set | grep -v "^_=" | grep -v "^PIPESTATUS=" | grep -v "^BASH_LINENO="
-}
-
-# save 2 'set'
-myset >"$tmp1"
-myset >"$tmp2"
-
-# make sure they don't differ
-diff "$tmp1" "$tmp2" >/dev/null 2>&1
-if [ $? -ne 0 ]; then
- # they differ, we cannot do the check
- echo >&2 "$me: cannot check with diff."
- can_diff=0
-fi
-
-# do it again, now including the script
-myset >"$tmp1"
-
-# include the plugin and its config
-if [ -f "$conf" ]; then
- # shellcheck source=/dev/null
- . "$conf"
- if [ $? -ne 0 ]; then
- echo >&2 "$me: cannot load config file $conf"
- rm "$tmp1" "$tmp2"
- exit 1
- fi
-fi
-
-# shellcheck source=/dev/null
-. "$chart"
-if [ $? -ne 0 ]; then
- echo >&2 "$me: cannot load chart file $chart"
- rm "$tmp1" "$tmp2"
- exit 1
-fi
-
-# remove all variables starting with the plugin name
-myset | grep -v "^$name" >"$tmp2"
-
-if [ $can_diff -eq 1 ]; then
- # check if they are different
- # make sure they don't differ
- diff "$tmp1" "$tmp2" >&2
- if [ $? -ne 0 ]; then
- # they differ
- rm "$tmp1" "$tmp2"
- exit 1
- fi
-fi
-
-rm "$tmp1" "$tmp2"
-exit 0
diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in
deleted file mode 100755
index 4e64b7e23..000000000
--- a/collectors/charts.d.plugin/charts.d.plugin.in
+++ /dev/null
@@ -1,809 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# charts.d.plugin allows easy development of BASH plugins
-#
-# if you need to run parallel charts.d processes, link this file to a different name
-# in the same directory, with a .plugin suffix and netdata will start both of them,
-# each will have a different config file and modules configuration directory.
-#
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin:@sbindir_POST@"
-
-PROGRAM_FILE="$0"
-MODULE_NAME="main"
-
-# -----------------------------------------------------------------------------
-# logging
-
-PROGRAM_NAME="$(basename "${0}")"
-SHORT_PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
-
-# these should be the same with syslog() priorities
-NDLP_EMERG=0 # system is unusable
-NDLP_ALERT=1 # action must be taken immediately
-NDLP_CRIT=2 # critical conditions
-NDLP_ERR=3 # error conditions
-NDLP_WARN=4 # warning conditions
-NDLP_NOTICE=5 # normal but significant condition
-NDLP_INFO=6 # informational
-NDLP_DEBUG=7 # debug-level messages
-
-# the max (numerically) log level we will log
-LOG_LEVEL=$NDLP_INFO
-
-set_log_min_priority() {
- case "${NETDATA_LOG_LEVEL,,}" in
- "emerg" | "emergency")
- LOG_LEVEL=$NDLP_EMERG
- ;;
-
- "alert")
- LOG_LEVEL=$NDLP_ALERT
- ;;
-
- "crit" | "critical")
- LOG_LEVEL=$NDLP_CRIT
- ;;
-
- "err" | "error")
- LOG_LEVEL=$NDLP_ERR
- ;;
-
- "warn" | "warning")
- LOG_LEVEL=$NDLP_WARN
- ;;
-
- "notice")
- LOG_LEVEL=$NDLP_NOTICE
- ;;
-
- "info")
- LOG_LEVEL=$NDLP_INFO
- ;;
-
- "debug")
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- esac
-}
-
-set_log_min_priority
-
-log() {
- local level="${1}"
- shift 1
-
- [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
-
- systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
-INVOCATION_ID=${NETDATA_INVOCATION_ID}
-SYSLOG_IDENTIFIER=${PROGRAM_NAME}
-PRIORITY=${level}
-THREAD_TAG=charts.d.plugin
-ND_LOG_SOURCE=collector
-MESSAGE=${MODULE_NAME}: ${*//\\n/--NEWLINE--}
-
-EOFLOG
- # AN EMPTY LINE IS NEEDED ABOVE
-}
-
-info() {
- log "$NDLP_INFO" "${@}"
-}
-
-warning() {
- log "$NDLP_WARN" "${@}"
-}
-
-error() {
- log "$NDLP_ERR" "${@}"
-}
-
-fatal() {
- log "$NDLP_ALERT" "${@}"
- echo "DISABLE"
- exit 1
-}
-
-debug() {
- [ "$debug" = "1" ] && log "$NDLP_DEBUG" "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# check for BASH v4+ (required for associative arrays)
-
-if [ ${BASH_VERSINFO[0]} -lt 4 ]; then
- echo >&2 "BASH version 4 or later is required (this is ${BASH_VERSION})."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# create temp dir
-
-debug=0
-TMP_DIR=
-chartsd_cleanup() {
- trap '' EXIT QUIT HUP INT TERM
-
- if [ ! -z "$TMP_DIR" -a -d "$TMP_DIR" ]; then
- [ $debug -eq 1 ] && echo >&2 "$PROGRAM_NAME: cleaning up temporary directory $TMP_DIR ..."
- rm -rf "$TMP_DIR"
- fi
- echo "EXIT"
- exit 0
-}
-trap chartsd_cleanup EXIT QUIT HUP INT TERM
-
-if [ $UID = "0" ]; then
- TMP_DIR="$(mktemp -d /var/run/netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
-else
- TMP_DIR="$(mktemp -d /tmp/.netdata-${PROGRAM_NAME}-XXXXXXXXXX)"
-fi
-
-logdate() {
- date "+%Y-%m-%d %H:%M:%S"
-}
-
-# -----------------------------------------------------------------------------
-# check a few commands
-
-require_cmd() {
- local x=$(which "${1}" 2>/dev/null || command -v "${1}" 2>/dev/null)
- if [ -z "${x}" -o ! -x "${x}" ]; then
- warning "command '${1}' is not found in ${PATH}."
- eval "${1^^}_CMD=\"\""
- return 1
- fi
-
- eval "${1^^}_CMD=\"${x}\""
- return 0
-}
-
-require_cmd date || exit 1
-require_cmd sed || exit 1
-require_cmd basename || exit 1
-require_cmd dirname || exit 1
-require_cmd cat || exit 1
-require_cmd grep || exit 1
-require_cmd egrep || exit 1
-require_cmd mktemp || exit 1
-require_cmd awk || exit 1
-require_cmd timeout || exit 1
-require_cmd curl || exit 1
-
-# -----------------------------------------------------------------------------
-
-[ $((BASH_VERSINFO[0])) -lt 4 ] && fatal "BASH version 4 or later is required, but found version: ${BASH_VERSION}. Please upgrade."
-
-info "started from '$PROGRAM_FILE' with options: $*"
-
-# -----------------------------------------------------------------------------
-# internal defaults
-# netdata exposes a few environment variables for us
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
-
-pluginsd="${NETDATA_PLUGINS_DIR}"
-stockconfd="${NETDATA_STOCK_CONFIG_DIR}/${SHORT_PROGRAM_NAME}"
-userconfd="${NETDATA_USER_CONFIG_DIR}/${SHORT_PROGRAM_NAME}"
-olduserconfd="${NETDATA_USER_CONFIG_DIR}"
-chartsd="$pluginsd/../charts.d"
-
-minimum_update_frequency="${NETDATA_UPDATE_EVERY-1}"
-update_every=${minimum_update_frequency} # this will be overwritten by the command line
-
-# work around for non BASH shells
-charts_create="_create"
-charts_update="_update"
-charts_check="_check"
-charts_underscore="_"
-
-# when making iterations, charts.d can loop more frequently
-# to prevent plugins missing iterations.
-# this is a percentage relative to update_every to align its
-# iterations.
-# The minimum is 10%, the maximum 100%.
-# So, if update_every is 1 second and time_divisor is 50,
-# charts.d will iterate every 500ms.
-# Charts will be called to collect data only if the time
-# passed since the last time the collected data is equal or
-# above their update_every.
-time_divisor=50
-
-# number of seconds to run without restart
-# after this time, charts.d.plugin will exit
-# netdata will restart it
-restart_timeout=$((3600 * 4))
-
-# check if the charts.d plugins are using global variables
-# they should not.
-# It does not currently support BASH v4 arrays, so it is
-# disabled
-dryrunner=0
-
-# check for timeout command
-check_for_timeout=1
-
-# the default enable/disable value for all charts
-enable_all_charts="yes"
-
-# -----------------------------------------------------------------------------
-# parse parameters
-
-check=0
-chart_only=
-while [ ! -z "$1" ]; do
- if [ "$1" = "check" ]; then
- check=1
- shift
- continue
- fi
-
- if [ "$1" = "debug" -o "$1" = "all" ]; then
- debug=1
- LOG_LEVEL=$NDLP_DEBUG
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1.chart.sh" ]; then
- debug=1
- LOG_LEVEL=$NDLP_DEBUG
- chart_only="$(echo $1.chart.sh | sed "s/\.chart\.sh$//g")"
- shift
- continue
- fi
-
- if [ -f "$chartsd/$1" ]; then
- debug=1
- LOG_LEVEL=$NDLP_DEBUG
- chart_only="$(echo $1 | sed "s/\.chart\.sh$//g")"
- shift
- continue
- fi
-
- # number check
- n="$1"
- x=$((n))
- if [ "$x" = "$n" ]; then
- shift
- update_every=$x
- [ $update_every -lt $minimum_update_frequency ] && update_every=$minimum_update_frequency
- continue
- fi
-
- fatal "Cannot understand parameter $1. Aborting."
-done
-
-# -----------------------------------------------------------------------------
-# loop control
-
-# default sleep function
-LOOPSLEEPMS_HIGHRES=0
-now_ms=
-current_time_ms_default() {
- now_ms="$(date +'%s')000"
-}
-current_time_ms="current_time_ms_default"
-current_time_ms_accuracy=1
-mysleep="sleep"
-
-# if found and included, this file overwrites loopsleepms()
-# and current_time_ms() with a high resolution timer function
-# for precise looping.
-source "$pluginsd/loopsleepms.sh.inc"
-[ $? -ne 0 ] && error "Failed to load '$pluginsd/loopsleepms.sh.inc'."
-
-# -----------------------------------------------------------------------------
-# load my configuration
-
-for myconfig in "${NETDATA_STOCK_CONFIG_DIR}/${SHORT_PROGRAM_NAME}.conf" "${NETDATA_USER_CONFIG_DIR}/${SHORT_PROGRAM_NAME}.conf"; do
- if [ -f "$myconfig" ]; then
- source "$myconfig"
- if [ $? -ne 0 ]; then
- error "Config file '$myconfig' loaded with errors."
- else
- info "Configuration file '$myconfig' loaded."
- fi
- else
- warning "Configuration file '$myconfig' not found."
- fi
-done
-
-# make sure time_divisor is right
-time_divisor=$((time_divisor))
-[ $time_divisor -lt 10 ] && time_divisor=10
-[ $time_divisor -gt 100 ] && time_divisor=100
-
-# we check for the timeout command, after we load our
-# configuration, so that the user may overwrite the
-# timeout command we use, providing a function that
-# can emulate the timeout command we need:
-# > timeout SECONDS command ...
-if [ $check_for_timeout -eq 1 ]; then
- require_cmd timeout || exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# internal checks
-
-# netdata passes the requested update frequency as the first argument
-update_every=$((update_every + 1 - 1)) # makes sure it is a number
-test $update_every -eq 0 && update_every=1 # if it is zero, make it 1
-
-# check the charts.d directory
-[ ! -d "$chartsd" ] && fatal "cannot find charts directory '$chartsd'"
-
-# -----------------------------------------------------------------------------
-# library functions
-
-fixid() {
- echo "$*" |
- tr -c "[A-Z][a-z][0-9]" "_" |
- sed -e "s|^_\+||g" -e "s|_\+$||g" -e "s|_\+|_|g" |
- tr "[A-Z]" "[a-z]"
-}
-
-isvarset() {
- [ -n "$1" ] && [ "$1" != "unknown" ] && [ "$1" != "none" ]
- return $?
-}
-
-getosid() {
- if isvarset "${NETDATA_CONTAINER_OS_ID}"; then
- echo "${NETDATA_CONTAINER_OS_ID}"
- else
- echo "${NETDATA_SYSTEM_OS_ID}"
- fi
-}
-
-run() {
- local ret pid="${BASHPID}" t
-
- if [ "z${1}" = "z-t" -a "${2}" != "0" ]; then
- t="${2}"
- shift 2
- timeout "${t}" "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- else
- "${@}" 2>"${TMP_DIR}/run.${pid}"
- ret=$?
- fi
-
- if [ ${ret} -ne 0 ]; then
- {
- printf "$(logdate): ${PROGRAM_NAME}: ${status}: ${MODULE_NAME}: command '"
- printf "%q " "${@}"
- printf "' failed with code ${ret}:\n --- BEGIN TRACE ---\n"
- cat "${TMP_DIR}/run.${pid}"
- printf " --- END TRACE ---\n"
- } >&2
- fi
- rm -f "${TMP_DIR}/run.${pid}"
-
- return ${ret}
-}
-
-# convert any floating point number
-# to integer, give a multiplier
-# the result is stored in ${FLOAT2INT_RESULT}
-# so that no fork is necessary
-# the multiplier must be a power of 10
-float2int() {
- local f m="$2" a b l v=($1)
- f=${v[0]}
-
- # the length of the multiplier - 1
- l=$((${#m} - 1))
-
- # check if the number is in scientific notation
- if [[ ${f} =~ ^[[:space:]]*(-)?[0-9.]+(e|E)(\+|-)[0-9]+ ]]; then
- # convert it to decimal
- # unfortunately, this fork cannot be avoided
- # if you know of a way to avoid it, please let me know
- f=$(printf "%0.${l}f" ${f})
- fi
-
- # split the floating point number
- # in integer (a) and decimal (b)
- a=${f/.*/}
- b=${f/*./}
-
- # if the integer part is missing
- # set it to zero
- [ -z "${a}" ] && a="0"
-
- # strip leading zeros from the integer part
- # base 10 conversion
- a=$((10#$a))
-
- # check the length of the decimal part
- # against the length of the multiplier
- if [ ${#b} -gt ${l} ]; then
- # too many digits - take the most significant
- b=${b:0:l}
-
- elif [ ${#b} -lt ${l} ]; then
- # too few digits - pad with zero on the right
- local z="00000000000000000000000" r=$((l - ${#b}))
- b="${b}${z:0:r}"
- fi
-
- # strip leading zeros from the decimal part
- # base 10 conversion
- b=$((10#$b))
-
- # store the result
- FLOAT2INT_RESULT=$(((a * m) + b))
-}
-
-# -----------------------------------------------------------------------------
-# charts check functions
-
-all_charts() {
- cd "$chartsd"
- [ $? -ne 0 ] && error "cannot cd to $chartsd" && return 1
-
- ls *.chart.sh | sed "s/\.chart\.sh$//g"
-}
-
-declare -A charts_enable_keyword=(
- ['apache']="force"
- ['cpu_apps']="force"
- ['cpufreq']="force"
- ['example']="force"
- ['exim']="force"
- ['hddtemp']="force"
- ['load_average']="force"
- ['mem_apps']="force"
- ['mysql']="force"
- ['nginx']="force"
- ['phpfpm']="force"
- ['postfix']="force"
- ['sensors']="force"
- ['squid']="force"
- ['tomcat']="force"
-)
-
-declare -A obsolete_charts=(
- ['apache']="python.d.plugin module"
- ['cpu_apps']="apps.plugin"
- ['cpufreq']="proc plugin"
- ['exim']="python.d.plugin module"
- ['hddtemp']="python.d.plugin module"
- ['load_average']="proc plugin"
- ['mem_apps']="proc plugin"
- ['mysql']="python.d.plugin module"
- ['nginx']="python.d.plugin module"
- ['phpfpm']="python.d.plugin module"
- ['postfix']="python.d.plugin module"
- ['squid']="python.d.plugin module"
- ['tomcat']="python.d.plugin module"
-)
-
-all_enabled_charts() {
- local charts enabled required
-
- # find all enabled charts
- for chart in $(all_charts); do
- MODULE_NAME="${chart}"
-
- if [ -n "${obsolete_charts["$MODULE_NAME"]}" ]; then
- debug "is replaced by ${obsolete_charts["$MODULE_NAME"]}, skipping it."
- continue
- fi
-
- eval "enabled=\$$chart"
- if [ -z "${enabled}" ]; then
- enabled="${enable_all_charts}"
- fi
-
- required="${charts_enable_keyword[${chart}]}"
- [ -z "${required}" ] && required="yes"
-
- if [ ! "${enabled}" = "${required}" ]; then
- info "is disabled. Add a line with $chart=$required in '${NETDATA_USER_CONFIG_DIR}/${PROGRAM_NAME}.conf' to enable it (or remove the line that disables it)."
- else
- debug "is enabled for auto-detection."
- local charts="$charts $chart"
- fi
- done
- MODULE_NAME="main"
-
- local charts2=
- for chart in $charts; do
- MODULE_NAME="${chart}"
-
- # check the enabled charts
- local check="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_check()")"
- if [ -z "$check" ]; then
- error "module '$chart' does not seem to have a $chart$charts_check() function. Disabling it."
- continue
- fi
-
- local create="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_create()")"
- if [ -z "$create" ]; then
- error "module '$chart' does not seem to have a $chart$charts_create() function. Disabling it."
- continue
- fi
-
- local update="$(cat "$chartsd/$chart.chart.sh" | sed "s/^ \+//g" | grep "^$chart$charts_update()")"
- if [ -z "$update" ]; then
- error "module '$chart' does not seem to have a $chart$charts_update() function. Disabling it."
- continue
- fi
-
- # check its config
- #if [ -f "$userconfd/$chart.conf" ]
- #then
- # if [ ! -z "$( cat "$userconfd/$chart.conf" | sed "s/^ \+//g" | grep -v "^$" | grep -v "^#" | grep -v "^$chart$charts_underscore" )" ]
- # then
- # error "module's $chart config $userconfd/$chart.conf should only have lines starting with $chart$charts_underscore . Disabling it."
- # continue
- # fi
- #fi
-
- #if [ $dryrunner -eq 1 ]
- # then
- # "$pluginsd/charts.d.dryrun-helper.sh" "$chart" "$chartsd/$chart.chart.sh" "$userconfd/$chart.conf" >/dev/null
- # if [ $? -ne 0 ]
- # then
- # error "module's $chart did not pass the dry run check. This means it uses global variables not starting with $chart. Disabling it."
- # continue
- # fi
- #fi
-
- local charts2="$charts2 $chart"
- done
- MODULE_NAME="main"
-
- echo $charts2
- debug "enabled charts: $charts2"
-}
-
-# -----------------------------------------------------------------------------
-# load the charts
-
-suffix_retries="_retries"
-suffix_update_every="_update_every"
-active_charts=
-for chart in $(all_enabled_charts); do
- MODULE_NAME="${chart}"
-
- debug "loading module: '$chartsd/$chart.chart.sh'"
-
- source "$chartsd/$chart.chart.sh"
- [ $? -ne 0 ] && warning "Module '$chartsd/$chart.chart.sh' loaded with errors."
-
- # first load the stock config
- if [ -f "$stockconfd/$chart.conf" ]; then
- debug "loading module configuration: '$stockconfd/$chart.conf'"
- source "$stockconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$stockconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$stockconfd/$chart.conf'"
- fi
-
- # then load the user config (it overwrites the stock)
- if [ -f "$userconfd/$chart.conf" ]; then
- debug "loading module configuration: '$userconfd/$chart.conf'"
- source "$userconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$userconfd/$chart.conf' loaded with errors."
- else
- debug "not found module configuration: '$userconfd/$chart.conf'"
-
- if [ -f "$olduserconfd/$chart.conf" ]; then
- # support for very old netdata that had the charts.d module configs in /etc/netdata
- info "loading module configuration from obsolete location: '$olduserconfd/$chart.conf'"
- source "$olduserconfd/$chart.conf"
- [ $? -ne 0 ] && warning "Config file '$olduserconfd/$chart.conf' loaded with errors."
- fi
- fi
-
- eval "dt=\$$chart$suffix_update_every"
- dt=$((dt + 1 - 1)) # make sure it is a number
- if [ $dt -lt $update_every ]; then
- eval "$chart$suffix_update_every=$update_every"
- fi
-
- $chart$charts_check
- if [ $? -eq 0 ]; then
- debug "module '$chart' activated"
- active_charts="$active_charts $chart"
- else
- error "module's '$chart' check() function reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "activated modules: $active_charts"
-
-# -----------------------------------------------------------------------------
-# check overwrites
-
-# enable work time reporting
-debug_time=
-test $debug -eq 1 && debug_time=tellwork
-
-# if we only need a specific chart, remove all the others
-if [ ! -z "${chart_only}" ]; then
- debug "requested to run only for: '${chart_only}'"
- check_charts=
- for chart in $active_charts; do
- if [ "$chart" = "$chart_only" ]; then
- check_charts="$chart"
- break
- fi
- done
- active_charts="$check_charts"
-fi
-debug "activated charts: $active_charts"
-
-# stop if we just need a pre-check
-if [ $check -eq 1 ]; then
- info "CHECK RESULT"
- info "Will run the charts: $active_charts"
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-
-cd "${TMP_DIR}" || exit 1
-
-# -----------------------------------------------------------------------------
-# create charts
-
-run_charts=
-for chart in $active_charts; do
- MODULE_NAME="${chart}"
-
- debug "calling '$chart$charts_create()'..."
- $chart$charts_create
- if [ $? -eq 0 ]; then
- run_charts="$run_charts $chart"
- debug "'$chart' initialized."
- else
- error "module's '$chart' function '$chart$charts_create()' reports failure."
- fi
-done
-MODULE_NAME="main"
-debug "run_charts='$run_charts'"
-
-# -----------------------------------------------------------------------------
-# update dimensions
-
-[ -z "$run_charts" ] && fatal "No charts to collect data from."
-
-keepalive() {
- if [ ! -t 1 ] && ! printf "\n"; then
- chartsd_cleanup
- fi
-}
-
-declare -A charts_last_update=() charts_update_every=() charts_retries=() charts_next_update=() charts_run_counter=() charts_serial_failures=()
-global_update() {
- local exit_at \
- c=0 dt ret last_ms exec_start_ms exec_end_ms \
- chart now_charts=() next_charts=($run_charts) \
- next_ms x seconds millis
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- exit_at=$((now_ms + (restart_timeout * 1000)))
-
- for chart in $run_charts; do
- eval "charts_update_every[$chart]=\$$chart$suffix_update_every"
- test -z "${charts_update_every[$chart]}" && charts_update_every[$chart]=$update_every
-
- eval "charts_retries[$chart]=\$$chart$suffix_retries"
- test -z "${charts_retries[$chart]}" && charts_retries[$chart]=10
-
- charts_last_update[$chart]=$((now_ms - (now_ms % (charts_update_every[$chart] * 1000))))
- charts_next_update[$chart]=$((charts_last_update[$chart] + (charts_update_every[$chart] * 1000)))
- charts_run_counter[$chart]=0
- charts_serial_failures[$chart]=0
-
- echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]} '' '' '$chart'"
- echo "DIMENSION run_time 'run time' absolute 1 1"
- done
-
- # the main loop
- while [ "${#next_charts[@]}" -gt 0 ]; do
- keepalive
-
- c=$((c + 1))
- now_charts=("${next_charts[@]}")
- next_charts=()
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
-
- for chart in "${now_charts[@]}"; do
- MODULE_NAME="${chart}"
-
- if [ ${now_ms} -ge ${charts_next_update[$chart]} ]; then
- last_ms=${charts_last_update[$chart]}
- dt=$((now_ms - last_ms))
-
- charts_last_update[$chart]=${now_ms}
-
- while [ ${charts_next_update[$chart]} -lt ${now_ms} ]; do
- charts_next_update[$chart]=$((charts_next_update[$chart] + (charts_update_every[$chart] * 1000)))
- done
-
- # the first call should not give a duration
- # so that netdata calibrates to current time
- dt=$((dt * 1000))
- charts_run_counter[$chart]=$((charts_run_counter[$chart] + 1))
- if [ ${charts_run_counter[$chart]} -eq 1 ]; then
- dt=
- fi
-
- exec_start_ms=$now_ms
- $chart$charts_update $dt
- ret=$?
-
- # return the current time in ms in $now_ms
- ${current_time_ms}
- exec_end_ms=$now_ms
-
- echo "BEGIN netdata.plugin_chartsd_$chart $dt"
- echo "SET run_time = $((exec_end_ms - exec_start_ms))"
- echo "END"
-
- if [ $ret -eq 0 ]; then
- charts_serial_failures[$chart]=0
- next_charts+=($chart)
- else
- charts_serial_failures[$chart]=$((charts_serial_failures[$chart] + 1))
-
- if [ ${charts_serial_failures[$chart]} -gt ${charts_retries[$chart]} ]; then
- error "module's '$chart' update() function reported failure ${charts_serial_failures[$chart]} times. Disabling it."
- else
- error "module's '$chart' update() function reports failure. Will keep trying for a while."
- next_charts+=($chart)
- fi
- fi
- else
- next_charts+=($chart)
- fi
- done
- MODULE_NAME="${chart}"
-
- # wait the time you are required to
- next_ms=$((now_ms + (update_every * 1000 * 100)))
- for x in "${charts_next_update[@]}"; do [ ${x} -lt ${next_ms} ] && next_ms=${x}; done
- next_ms=$((next_ms - now_ms))
-
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 1 -a ${next_ms} -gt 0 ]; then
- next_ms=$((next_ms + current_time_ms_accuracy))
- seconds=$((next_ms / 1000))
- millis=$((next_ms % 1000))
- if [ ${millis} -lt 10 ]; then
- millis="00${millis}"
- elif [ ${millis} -lt 100 ]; then
- millis="0${millis}"
- fi
-
- debug "sleeping for ${seconds}.${millis} seconds."
- ${mysleep} ${seconds}.${millis}
- else
- debug "sleeping for ${update_every} seconds."
- ${mysleep} $update_every
- fi
-
- test ${now_ms} -ge ${exit_at} && exit 0
- done
-
- fatal "nothing left to do, exiting..."
-}
-
-global_update
diff --git a/collectors/charts.d.plugin/example/Makefile.inc b/collectors/charts.d.plugin/example/Makefile.inc
deleted file mode 100644
index e6838fbbe..000000000
--- a/collectors/charts.d.plugin/example/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += example/example.chart.sh
-dist_chartsconfig_DATA += example/example.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += example/README.md example/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/example/README.md b/collectors/charts.d.plugin/example/README.md
deleted file mode 100644
index c2860eb3d..000000000
--- a/collectors/charts.d.plugin/example/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-<!--
-title: "Example"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/example/README.md"
-sidebar_label: "example-charts.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Mock Collectors"
--->
-
-# Example
-
-If you want to understand how charts.d data collector functions, check out the [charts.d example](https://raw.githubusercontent.com/netdata/netdata/master/collectors/charts.d.plugin/example/example.chart.sh).
-
-
diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh
deleted file mode 100644
index 6bbbcf1d7..000000000
--- a/collectors/charts.d.plugin/example/example.chart.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-example_update_every=
-
-# the priority is used to sort the charts on the dashboard
-# 1 = the first chart
-example_priority=150000
-
-# to enable this chart, you have to set this to 12345
-# (just a demonstration for something that needs to be checked)
-example_magic_number=
-
-# global variables to store our collected data
-# remember: they need to start with the module name example_
-example_value1=
-example_value2=
-example_value3=
-example_value4=
-example_last=0
-example_count=0
-
-example_get() {
- # do all the work to collect / calculate the values
- # for each dimension
- #
- # Remember:
- # 1. KEEP IT SIMPLE AND SHORT
- # 2. AVOID FORKS (avoid piping commands)
- # 3. AVOID CALLING TOO MANY EXTERNAL PROGRAMS
- # 4. USE LOCAL VARIABLES (global variables may overlap with other modules)
-
- example_value1=$RANDOM
- example_value2=$RANDOM
- example_value3=$RANDOM
- example_value4=$((8192 + (RANDOM * 16383 / 32767)))
-
- if [ $example_count -gt 0 ]; then
- example_count=$((example_count - 1))
-
- [ $example_last -gt 16383 ] && example_value4=$((example_last + (RANDOM * ((32767 - example_last) / 2) / 32767)))
- [ $example_last -le 16383 ] && example_value4=$((example_last - (RANDOM * (example_last / 2) / 32767)))
- else
- example_count=$((1 + (RANDOM * 5 / 32767)))
-
- if [ $example_last -gt 16383 ] && [ $example_value4 -gt 16383 ]; then
- example_value4=$((example_value4 - 16383))
- fi
- if [ $example_last -le 16383 ] && [ $example_value4 -lt 16383 ]; then
- example_value4=$((example_value4 + 16383))
- fi
- fi
- example_last=$example_value4
-
- # this should return:
- # - 0 to send the data to netdata
- # - 1 to report a failure to collect the data
-
- return 0
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-example_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- # check something
- [ "${example_magic_number}" != "12345" ] && error "manual configuration required: you have to set example_magic_number=$example_magic_number in example.conf to start example chart." && return 1
-
- # check that we can collect data
- example_get || return 1
-
- return 0
-}
-
-# _create is called once, to create the charts
-example_create() {
- # create the chart with 3 dimensions
- cat << EOF
-CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every '' '' 'example'
-DIMENSION random1 '' percentage-of-absolute-row 1 1
-DIMENSION random2 '' percentage-of-absolute-row 1 1
-DIMENSION random3 '' percentage-of-absolute-row 1 1
-CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every '' '' 'example'
-DIMENSION random '' absolute 1 1
-EOF
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-example_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- example_get || return 1
-
- # write the result of the work.
- cat << VALUESEOF
-BEGIN example.random $1
-SET random1 = $example_value1
-SET random2 = $example_value2
-SET random3 = $example_value3
-END
-BEGIN example.random2 $1
-SET random = $example_value4
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/example/example.conf b/collectors/charts.d.plugin/example/example.conf
deleted file mode 100644
index 6232ca584..000000000
--- a/collectors/charts.d.plugin/example/example.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# to enable this chart, you have to set this to 12345
-# (just a demonstration for something that needs to be checked)
-#example_magic_number=12345
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#example_update_every=
-
-# the charts priority on the dashboard
-#example_priority=150000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#example_retries=10
diff --git a/collectors/charts.d.plugin/libreswan/Makefile.inc b/collectors/charts.d.plugin/libreswan/Makefile.inc
deleted file mode 100644
index af767d0dd..000000000
--- a/collectors/charts.d.plugin/libreswan/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += libreswan/libreswan.chart.sh
-dist_chartsconfig_DATA += libreswan/libreswan.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += libreswan/README.md libreswan/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
deleted file mode 120000
index 1416d9597..000000000
--- a/collectors/charts.d.plugin/libreswan/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/libreswan.md \ No newline at end of file
diff --git a/collectors/charts.d.plugin/libreswan/integrations/libreswan.md b/collectors/charts.d.plugin/libreswan/integrations/libreswan.md
deleted file mode 100644
index bd1eec647..000000000
--- a/collectors/charts.d.plugin/libreswan/integrations/libreswan.md
+++ /dev/null
@@ -1,194 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/libreswan/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/libreswan/metadata.yaml"
-sidebar_label: "Libreswan"
-learn_status: "Published"
-learn_rel_path: "Data Collection/VPNs"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Libreswan
-
-
-<img src="https://netdata.cloud/img/libreswan.png" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: libreswan
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.
-
-The collector uses the `ipsec` command to collect the information it needs.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per IPSEC tunnel
-
-Metrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| libreswan.net | in, out | kilobits/s |
-| libreswan.uptime | uptime | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Permissions to execute `ipsec`
-
-The plugin executes 2 commands to collect all the information it needs:
-
-```sh
-ipsec whack --status
-ipsec whack --trafficstatus
-```
-
-The first command is used to extract the currently established tunnels, their IDs and their names.
-The second command is used to extract the current uptime and traffic.
-
-Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
-The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
-
-To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
-
-```
-netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
-netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
-```
-
-Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/libreswan.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/libreswan.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the libreswan collector.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| libreswan_priority | The charts priority on the dashboard | 90000 | no |
-| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |
-
-</details>
-
-#### Examples
-
-##### Run `ipsec` without sudo
-
-Run the `ipsec` utility without sudo
-
-```yaml
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#libreswan_update_every=1
-
-# the charts priority on the dashboard
-#libreswan_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#libreswan_retries=10
-
-# set to 1, to run ipsec with sudo (the default)
-# set to 0, to run ipsec without sudo
-libreswan_sudo=0
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 libreswan
- ```
-
-
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
deleted file mode 100644
index d526f7a91..000000000
--- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh
+++ /dev/null
@@ -1,187 +0,0 @@
-# shellcheck shell=bash disable=SC1117
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# _update_every is a special variable - it holds the number of seconds
-# between the calls of the _update() function
-libreswan_update_every=1
-
-# the priority is used to sort the charts on the dashboard
-# 1 = the first chart
-libreswan_priority=90000
-
-# set to 1, to run ipsec with sudo
-libreswan_sudo=1
-
-# global variables to store our collected data
-
-# [TUNNELID] = TUNNELNAME
-# here we track the *latest* established tunnels
-# as detected by: ipsec whack --status
-declare -A libreswan_connected_tunnels=()
-
-# [TUNNELID] = VALUE
-# here we track values of all established tunnels (not only the latest)
-# as detected by: ipsec whack --trafficstatus
-declare -A libreswan_traffic_in=()
-declare -A libreswan_traffic_out=()
-declare -A libreswan_established_add_time=()
-
-# [TUNNELNAME] = CHARTID
-# here we remember CHARTIDs of all tunnels
-# we need this to avoid converting tunnel names to chart IDs on every iteration
-declare -A libreswan_tunnel_charts=()
-
-is_able_sudo_ipsec() {
- if ! sudo -n -l "${IPSEC_CMD}" whack --status > /dev/null 2>&1; then
- return 1
- fi
- if ! sudo -n -l "${IPSEC_CMD}" whack --trafficstatus > /dev/null 2>&1; then
- return 1
- fi
- return 0
-}
-
-# run the ipsec command
-libreswan_ipsec() {
- if [ ${libreswan_sudo} -ne 0 ]; then
- sudo -n "${IPSEC_CMD}" "${@}"
- return $?
- else
- "${IPSEC_CMD}" "${@}"
- return $?
- fi
-}
-
-# fetch latest values - fill the arrays
-libreswan_get() {
- # do all the work to collect / calculate the values
- # for each dimension
-
- # empty the variables
- libreswan_traffic_in=()
- libreswan_traffic_out=()
- libreswan_established_add_time=()
- libreswan_connected_tunnels=()
-
- # convert the ipsec command output to a shell script
- # and source it to get the values
- # shellcheck disable=SC1090
- source <(
- {
- libreswan_ipsec whack --status
- libreswan_ipsec whack --trafficstatus
- } | sed -n \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\".*IPsec SA established.*newest IPSEC.*|libreswan_connected_tunnels[\"\1\"]=\"\2\"|p" \
- -e "s|[0-9]\+ #\([0-9]\+\): \"\(.*\)\",\{0,1\}.* add_time=\([0-9]\+\),.* inBytes=\([0-9]\+\),.* outBytes=\([0-9]\+\).*|libreswan_traffic_in[\"\1\"]=\"\4\"; libreswan_traffic_out[\"\1\"]=\"\5\"; libreswan_established_add_time[\"\1\"]=\"\3\";|p"
- ) || return 1
-
- # check we got some data
- [ ${#libreswan_connected_tunnels[@]} -eq 0 ] && return 1
-
- return 0
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-libreswan_check() {
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- require_cmd ipsec || return 1
-
- # make sure it is libreswan
- # shellcheck disable=SC2143
- if [ -z "$(ipsec --version | grep -i libreswan)" ]; then
- error "ipsec command is not Libreswan. Disabling Libreswan plugin."
- return 1
- fi
-
- if [ ${libreswan_sudo} -ne 0 ] && ! is_able_sudo_ipsec; then
- error "not enough permissions to execute ipsec with sudo. Disabling Libreswan plugin."
- return 1
- fi
-
- # check that we can collect data
- libreswan_get || return 1
-
- return 0
-}
-
-# create the charts for an ipsec tunnel
-libreswan_create_one() {
- local n="${1}" name
-
- name="${libreswan_connected_tunnels[${n}]}"
-
- [ -n "${libreswan_tunnel_charts[${name}]}" ] && return 0
-
- libreswan_tunnel_charts[${name}]="$(fixid "${name}")"
-
- cat << EOF
-CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every '' '' 'libreswan'
-DIMENSION in '' incremental 8 1000
-DIMENSION out '' incremental -8 1000
-CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every '' '' 'libreswan'
-DIMENSION uptime '' absolute 1 1
-EOF
-
- return 0
-
-}
-
-# _create is called once, to create the charts
-libreswan_create() {
- local n
- for n in "${!libreswan_connected_tunnels[@]}"; do
- libreswan_create_one "${n}"
- done
- return 0
-}
-
-libreswan_now=$(date +%s)
-
-# send the values to netdata for an ipsec tunnel
-libreswan_update_one() {
- local n="${1}" microseconds="${2}" name id uptime
-
- name="${libreswan_connected_tunnels[${n}]}"
- id="${libreswan_tunnel_charts[${name}]}"
-
- [ -z "${id}" ] && libreswan_create_one "${name}"
-
- uptime=$((libreswan_now - libreswan_established_add_time[${n}]))
- [ ${uptime} -lt 0 ] && uptime=0
-
- # write the result of the work.
- cat << VALUESEOF
-BEGIN libreswan.${id}_net ${microseconds}
-SET in = ${libreswan_traffic_in[${n}]}
-SET out = ${libreswan_traffic_out[${n}]}
-END
-BEGIN libreswan.${id}_uptime ${microseconds}
-SET uptime = ${uptime}
-END
-VALUESEOF
-}
-
-# _update is called continuously, to collect the values
-libreswan_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- libreswan_get || return 1
- libreswan_now=$(date +%s)
-
- local n
- for n in "${!libreswan_connected_tunnels[@]}"; do
- libreswan_update_one "${n}" "${@}"
- done
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/libreswan/libreswan.conf b/collectors/charts.d.plugin/libreswan/libreswan.conf
deleted file mode 100644
index 9b3ee77b7..000000000
--- a/collectors/charts.d.plugin/libreswan/libreswan.conf
+++ /dev/null
@@ -1,29 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#libreswan_update_every=1
-
-# the charts priority on the dashboard
-#libreswan_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#libreswan_retries=10
-
-# set to 1, to run ipsec with sudo (the default)
-# set to 0, to run ipsec without sudo
-#libreswan_sudo=1
-
-# TO ALLOW NETDATA RUN ipsec AS ROOT
-# CREATE THE FILE: /etc/sudoers.d/netdata
-# WITH THESE 2 LINES (uncommented of course):
-#
-# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
-# netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
diff --git a/collectors/charts.d.plugin/libreswan/metadata.yaml b/collectors/charts.d.plugin/libreswan/metadata.yaml
deleted file mode 100644
index 77cb25450..000000000
--- a/collectors/charts.d.plugin/libreswan/metadata.yaml
+++ /dev/null
@@ -1,146 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: libreswan
- monitored_instance:
- name: Libreswan
- link: "https://libreswan.org/"
- categories:
- - data-collection.vpns
- icon_filename: "libreswan.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - vpn
- - libreswan
- - network
- - ipsec
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts."
- method_description: "The collector uses the `ipsec` command to collect the information it needs."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Permissions to execute `ipsec`"
- description: |
- The plugin executes 2 commands to collect all the information it needs:
-
- ```sh
- ipsec whack --status
- ipsec whack --trafficstatus
- ```
-
- The first command is used to extract the currently established tunnels, their IDs and their names.
- The second command is used to extract the current uptime and traffic.
-
- Most probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.
- The plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.
-
- To allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:
-
- ```
- netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status
- netdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus
- ```
-
- Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).
- configuration:
- file:
- name: charts.d/libreswan.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the libreswan collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: libreswan_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: libreswan_priority
- description: The charts priority on the dashboard
- default_value: 90000
- required: false
- - name: libreswan_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- - name: libreswan_sudo
- description: Whether to run `ipsec` with `sudo` or not.
- default_value: 1
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Run `ipsec` without sudo
- description: Run the `ipsec` utility without sudo
- config: |
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- #libreswan_update_every=1
-
- # the charts priority on the dashboard
- #libreswan_priority=90000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #libreswan_retries=10
-
- # set to 1, to run ipsec with sudo (the default)
- # set to 0, to run ipsec without sudo
- libreswan_sudo=0
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: IPSEC tunnel
- description: "Metrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics."
- labels: []
- metrics:
- - name: libreswan.net
- description: LibreSWAN Tunnel ${name} Traffic
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: libreswan.uptime
- description: LibreSWAN Tunnel ${name} Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
diff --git a/collectors/charts.d.plugin/loopsleepms.sh.inc b/collectors/charts.d.plugin/loopsleepms.sh.inc
deleted file mode 100644
index 5608b8d8f..000000000
--- a/collectors/charts.d.plugin/loopsleepms.sh.inc
+++ /dev/null
@@ -1,227 +0,0 @@
-# no need for shebang - this file is included from other scripts
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-LOOPSLEEP_DATE="$(which date 2>/dev/null || command -v date 2>/dev/null)"
-if [ -z "$LOOPSLEEP_DATE" ]; then
- echo >&2 "$0: ERROR: Cannot find the command 'date' in the system path."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# use the date command as a high resolution timer
-
-# macOS 'date' doesn't support '%N' precision
-# echo $(/bin/date +"%N") is "N"
-if [ "$($LOOPSLEEP_DATE +"%N")" = "N" ]; then
- LOOPSLEEP_DATE_FORMAT="%s * 1000"
-else
- LOOPSLEEP_DATE_FORMAT="%s * 1000 + 10#%-N / 1000000"
-fi
-
-now_ms=
-LOOPSLEEPMS_HIGHRES=1
-test "$($LOOPSLEEP_DATE +%N)" = "%N" && LOOPSLEEPMS_HIGHRES=0
-test -z "$($LOOPSLEEP_DATE +%N)" && LOOPSLEEPMS_HIGHRES=0
-current_time_ms_from_date() {
- if [ $LOOPSLEEPMS_HIGHRES -eq 0 ]; then
- now_ms="$($LOOPSLEEP_DATE +'%s')000"
- else
- now_ms="$(($($LOOPSLEEP_DATE +"$LOOPSLEEP_DATE_FORMAT")))"
- fi
-}
-
-# -----------------------------------------------------------------------------
-# use /proc/uptime as a high resolution timer
-
-current_time_ms_from_date
-current_time_ms_from_uptime_started="${now_ms}"
-current_time_ms_from_uptime_last="${now_ms}"
-current_time_ms_from_uptime_first=0
-current_time_ms_from_uptime() {
- local up rest arr=() n
-
- read up rest </proc/uptime
- if [ $? -ne 0 ]; then
- echo >&2 "$0: Cannot read /proc/uptime - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- return
- fi
-
- arr=(${up//./ })
-
- if [ ${#arr[1]} -lt 1 ]; then
- n="${arr[0]}000"
- elif [ ${#arr[1]} -lt 2 ]; then
- n="${arr[0]}${arr[1]}00"
- elif [ ${#arr[1]} -lt 3 ]; then
- n="${arr[0]}${arr[1]}0"
- else
- n="${arr[0]}${arr[1]}"
- fi
-
- now_ms=$((current_time_ms_from_uptime_started - current_time_ms_from_uptime_first + n))
-
- if [ "${now_ms}" -lt "${current_time_ms_from_uptime_last}" ]; then
- echo >&2 "$0: Cannot use current_time_ms_from_uptime() - new time ${now_ms} is older than the last ${current_time_ms_from_uptime_last} - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_from_date
- current_time_ms_accuracy=1
- fi
-
- current_time_ms_from_uptime_last="${now_ms}"
-}
-current_time_ms_from_uptime
-current_time_ms_from_uptime_first="$((now_ms - current_time_ms_from_uptime_started))"
-current_time_ms_from_uptime_last="${current_time_ms_from_uptime_first}"
-current_time_ms="current_time_ms_from_uptime"
-current_time_ms_accuracy=10
-if [ "${current_time_ms_from_uptime_first}" -eq 0 ]; then
- echo >&2 "$0: Invalid setup for current_time_ms_from_uptime() - falling back to current_time_ms_from_date()."
- current_time_ms="current_time_ms_from_date"
- current_time_ms_accuracy=1
-fi
-
-# -----------------------------------------------------------------------------
-# use read with timeout for sleep
-
-mysleep=""
-
-mysleep_fifo="${NETDATA_CACHE_DIR-/tmp}/.netdata_bash_sleep_timer_fifo"
-[ -f "${mysleep_fifo}" ] && rm "${mysleep_fifo}"
-[ ! -p "${mysleep_fifo}" ] && mkfifo "${mysleep_fifo}"
-[ -p "${mysleep_fifo}" ] && mysleep="mysleep_read"
-
-mysleep_read() {
- read -t "${1}" <>"${mysleep_fifo}"
- ret=$?
- if [ $ret -le 128 ]; then
- echo >&2 "$0: Cannot use read for sleeping (return code ${ret})."
- mysleep="sleep"
- ${mysleep} "${1}"
- fi
-}
-
-# -----------------------------------------------------------------------------
-# use bash loadable module for sleep
-
-mysleep_builtin() {
- builtin sleep "${1}"
- ret=$?
- if [ $ret -ne 0 ]; then
- echo >&2 "$0: Cannot use builtin sleep for sleeping (return code ${ret})."
- mysleep="sleep"
- ${mysleep} "${1}"
- fi
-}
-
-if [ -z "${mysleep}" -a "$((BASH_VERSINFO[0] + 0))" -ge 3 -a "${NETDATA_BASH_LOADABLES}" != "DISABLE" ]; then
- # enable modules only for bash version 3+
-
- for bash_modules_path in ${BASH_LOADABLES_PATH//:/ } "$(pkg-config bash --variable=loadablesdir 2>/dev/null)" "/usr/lib/bash" "/lib/bash" "/lib64/bash" "/usr/local/lib/bash" "/usr/local/lib64/bash"; do
- [ -z "${bash_modules_path}" -o ! -d "${bash_modules_path}" ] && continue
-
- # check for sleep
- for bash_module_sleep in "sleep" "sleep.so"; do
- if [ -f "${bash_modules_path}/${bash_module_sleep}" ]; then
- if enable -f "${bash_modules_path}/${bash_module_sleep}" sleep 2>/dev/null; then
- mysleep="mysleep_builtin"
- # echo >&2 "$0: Using bash loadable ${bash_modules_path}/${bash_module_sleep} for sleep"
- break
- fi
- fi
-
- done
-
- [ ! -z "${mysleep}" ] && break
- done
-fi
-
-# -----------------------------------------------------------------------------
-# fallback to external sleep
-
-[ -z "${mysleep}" ] && mysleep="sleep"
-
-# -----------------------------------------------------------------------------
-# this function is used to sleep a fraction of a second
-# it calculates the difference between every time is called
-# and tries to align the sleep time to give you exactly the
-# loop you need.
-
-LOOPSLEEPMS_LASTRUN=0
-LOOPSLEEPMS_NEXTRUN=0
-LOOPSLEEPMS_LASTSLEEP=0
-LOOPSLEEPMS_LASTWORK=0
-
-loopsleepms() {
- local tellwork=0 t="${1}" div s m now mstosleep
-
- if [ "${t}" = "tellwork" ]; then
- tellwork=1
- shift
- t="${1}"
- fi
-
- # $t = the time in seconds to wait
-
- # if high resolution is not supported
- # just sleep the time requested, in seconds
- if [ ${LOOPSLEEPMS_HIGHRES} -eq 0 ]; then
- sleep ${t}
- return
- fi
-
- # get the current time, in ms in ${now_ms}
- ${current_time_ms}
-
- # calculate ms since last run
- [ ${LOOPSLEEPMS_LASTRUN} -gt 0 ] &&
- LOOPSLEEPMS_LASTWORK=$((now_ms - LOOPSLEEPMS_LASTRUN - LOOPSLEEPMS_LASTSLEEP + current_time_ms_accuracy))
- # echo "# last loop's work took $LOOPSLEEPMS_LASTWORK ms"
-
- # remember this run
- LOOPSLEEPMS_LASTRUN=${now_ms}
-
- # calculate the next run
- LOOPSLEEPMS_NEXTRUN=$(((now_ms - (now_ms % (t * 1000))) + (t * 1000)))
-
- # calculate ms to sleep
- mstosleep=$((LOOPSLEEPMS_NEXTRUN - now_ms + current_time_ms_accuracy))
- # echo "# mstosleep is $mstosleep ms"
-
- # if we are too slow, sleep some time
- test ${mstosleep} -lt 200 && mstosleep=200
-
- s=$((mstosleep / 1000))
- m=$((mstosleep - (s * 1000)))
- [ "${m}" -lt 100 ] && m="0${m}"
- [ "${m}" -lt 10 ] && m="0${m}"
-
- test $tellwork -eq 1 && echo >&2 " >>> PERFORMANCE >>> WORK TOOK ${LOOPSLEEPMS_LASTWORK} ms ( $((LOOPSLEEPMS_LASTWORK * 100 / 1000)).$((LOOPSLEEPMS_LASTWORK % 10))% cpu ) >>> SLEEPING ${mstosleep} ms"
-
- # echo "# sleeping ${s}.${m}"
- # echo
- ${mysleep} ${s}.${m}
-
- # keep the values we need
- # for our next run
- LOOPSLEEPMS_LASTSLEEP=$mstosleep
-}
-
-# test it
-#while [ 1 ]
-#do
-# r=$(( (RANDOM * 2000 / 32767) ))
-# s=$((r / 1000))
-# m=$((r - (s * 1000)))
-# [ "${m}" -lt 100 ] && m="0${m}"
-# [ "${m}" -lt 10 ] && m="0${m}"
-# echo "${r} = ${s}.${m}"
-#
-# # the work
-# ${mysleep} ${s}.${m}
-#
-# # the alignment loop
-# loopsleepms tellwork 1
-#done
diff --git a/collectors/charts.d.plugin/opensips/Makefile.inc b/collectors/charts.d.plugin/opensips/Makefile.inc
deleted file mode 100644
index a7b5d3a92..000000000
--- a/collectors/charts.d.plugin/opensips/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += opensips/opensips.chart.sh
-dist_chartsconfig_DATA += opensips/opensips.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += opensips/README.md opensips/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
deleted file mode 120000
index bb85ba6d0..000000000
--- a/collectors/charts.d.plugin/opensips/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/opensips.md \ No newline at end of file
diff --git a/collectors/charts.d.plugin/opensips/integrations/opensips.md b/collectors/charts.d.plugin/opensips/integrations/opensips.md
deleted file mode 100644
index 8c88dba0b..000000000
--- a/collectors/charts.d.plugin/opensips/integrations/opensips.md
+++ /dev/null
@@ -1,192 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/opensips/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/opensips/metadata.yaml"
-sidebar_label: "OpenSIPS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Telephony Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# OpenSIPS
-
-
-<img src="https://netdata.cloud/img/opensips.png" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: opensips
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.
-
-The collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per OpenSIPS instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| opensips.dialogs_active | active, early | dialogs |
-| opensips.users | registered, location, contacts, expires | users |
-| opensips.registrar | accepted, rejected | registrations/s |
-| opensips.transactions | UAS, UAC | transactions/s |
-| opensips.core_rcv | requests, replies | queries/s |
-| opensips.core_fwd | requests, replies | queries/s |
-| opensips.core_drop | requests, replies | queries/s |
-| opensips.core_err | requests, replies | queries/s |
-| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |
-| opensips.tm_replies | received, relayed, local | replies/s |
-| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |
-| opensips.transactions_inuse | inuse | transactions |
-| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |
-| opensips.dialogs | processed, expire, failed | dialogs/s |
-| opensips.net_waiting | UDP, TCP | kilobytes |
-| opensips.uri_checks | positive, negative | checks / sec |
-| opensips.traces | requests, replies | traces / sec |
-| opensips.shmem | total, used, real_used, max_used, free | kilobytes |
-| opensips.shmem_fragment | fragments | fragments |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Required software
-
-The collector requires the `opensipsctl` to be installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/opensips.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/opensips.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the opensips collector.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |
-| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |
-| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |
-| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |
-| opensips_priority | The charts priority on the dashboard. | 80000 | no |
-| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Custom `opensipsctl` command
-
-Set a custom path to the `opensipsctl` command
-
-```yaml
-#opensips_opts="fifo get_statistics all"
-opensips_cmd=/opt/opensips/bin/opensipsctl
-#opensips_timeout=2
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#opensips_update_every=5
-
-# the charts priority on the dashboard
-#opensips_priority=80000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#opensips_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 opensips
- ```
-
-
diff --git a/collectors/charts.d.plugin/opensips/metadata.yaml b/collectors/charts.d.plugin/opensips/metadata.yaml
deleted file mode 100644
index 356de5615..000000000
--- a/collectors/charts.d.plugin/opensips/metadata.yaml
+++ /dev/null
@@ -1,270 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: opensips
- monitored_instance:
- name: OpenSIPS
- link: "https://opensips.org/"
- categories:
- - data-collection.telephony-servers
- icon_filename: "opensips.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - opensips
- - sip
- - voice
- - video
- - stream
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Examine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services."
- method_description: "The collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Required software"
- description: "The collector requires the `opensipsctl` to be installed."
- configuration:
- file:
- name: charts.d/opensips.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the opensips collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: opensips_opts
- description: Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server.
- default_value: "fifo get_statistics all"
- required: false
- - name: opensips_cmd
- description: If `opensipsctl` is not in $PATH, specify it's full path here.
- default_value: ""
- required: false
- - name: opensips_timeout
- description: How long to wait for `opensipsctl` to respond.
- default_value: 2
- required: false
- - name: opensips_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 5
- required: false
- - name: opensips_priority
- description: The charts priority on the dashboard.
- default_value: 80000
- required: false
- - name: opensips_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Custom `opensipsctl` command
- description: Set a custom path to the `opensipsctl` command
- config: |
- #opensips_opts="fifo get_statistics all"
- opensips_cmd=/opt/opensips/bin/opensipsctl
- #opensips_timeout=2
-
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- #opensips_update_every=5
-
- # the charts priority on the dashboard
- #opensips_priority=80000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #opensips_retries=10
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: opensips.dialogs_active
- description: OpenSIPS Active Dialogs
- unit: "dialogs"
- chart_type: area
- dimensions:
- - name: active
- - name: early
- - name: opensips.users
- description: OpenSIPS Users
- unit: "users"
- chart_type: line
- dimensions:
- - name: registered
- - name: location
- - name: contacts
- - name: expires
- - name: opensips.registrar
- description: OpenSIPS Registrar
- unit: "registrations/s"
- chart_type: line
- dimensions:
- - name: accepted
- - name: rejected
- - name: opensips.transactions
- description: OpenSIPS Transactions
- unit: "transactions/s"
- chart_type: line
- dimensions:
- - name: UAS
- - name: UAC
- - name: opensips.core_rcv
- description: OpenSIPS Core Receives
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: replies
- - name: opensips.core_fwd
- description: OpenSIPS Core Forwards
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: replies
- - name: opensips.core_drop
- description: OpenSIPS Core Drops
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: replies
- - name: opensips.core_err
- description: OpenSIPS Core Errors
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: replies
- - name: opensips.core_bad
- description: OpenSIPS Core Bad
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: bad_URIs_rcvd
- - name: unsupported_methods
- - name: bad_msg_hdr
- - name: opensips.tm_replies
- description: OpenSIPS TM Replies
- unit: "replies/s"
- chart_type: line
- dimensions:
- - name: received
- - name: relayed
- - name: local
- - name: opensips.transactions_status
- description: OpenSIPS Transactions Status
- unit: "transactions/s"
- chart_type: line
- dimensions:
- - name: 2xx
- - name: 3xx
- - name: 4xx
- - name: 5xx
- - name: 6xx
- - name: opensips.transactions_inuse
- description: OpenSIPS InUse Transactions
- unit: "transactions"
- chart_type: line
- dimensions:
- - name: inuse
- - name: opensips.sl_replies
- description: OpenSIPS SL Replies
- unit: "replies/s"
- chart_type: line
- dimensions:
- - name: 1xx
- - name: 2xx
- - name: 3xx
- - name: 4xx
- - name: 5xx
- - name: 6xx
- - name: sent
- - name: error
- - name: ACKed
- - name: opensips.dialogs
- description: OpenSIPS Dialogs
- unit: "dialogs/s"
- chart_type: line
- dimensions:
- - name: processed
- - name: expire
- - name: failed
- - name: opensips.net_waiting
- description: OpenSIPS Network Waiting
- unit: "kilobytes"
- chart_type: line
- dimensions:
- - name: UDP
- - name: TCP
- - name: opensips.uri_checks
- description: OpenSIPS URI Checks
- unit: "checks / sec"
- chart_type: line
- dimensions:
- - name: positive
- - name: negative
- - name: opensips.traces
- description: OpenSIPS Traces
- unit: "traces / sec"
- chart_type: line
- dimensions:
- - name: requests
- - name: replies
- - name: opensips.shmem
- description: OpenSIPS Shared Memory
- unit: "kilobytes"
- chart_type: line
- dimensions:
- - name: total
- - name: used
- - name: real_used
- - name: max_used
- - name: free
- - name: opensips.shmem_fragment
- description: OpenSIPS Shared Memory Fragmentation
- unit: "fragments"
- chart_type: line
- dimensions:
- - name: fragments
diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh
deleted file mode 100644
index 02401fd59..000000000
--- a/collectors/charts.d.plugin/opensips/opensips.chart.sh
+++ /dev/null
@@ -1,325 +0,0 @@
-# shellcheck shell=bash disable=SC1117,SC2154,SC2086
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-opensips_opts="fifo get_statistics all"
-opensips_cmd=
-opensips_update_every=5
-opensips_timeout=2
-opensips_priority=80000
-
-opensips_get_stats() {
- run -t $opensips_timeout "$opensips_cmd" $opensips_opts |
- grep "^\(core\|dialog\|net\|registrar\|shmem\|siptrace\|sl\|tm\|uri\|usrloc\):[a-zA-Z0-9_-]\+[[:space:]]*[=:]\+[[:space:]]*[0-9]\+[[:space:]]*$" |
- sed \
- -e "s|[[:space:]]*[=:]\+[[:space:]]*\([0-9]\+\)[[:space:]]*$|=\1|g" \
- -e "s|[[:space:]:-]\+|_|g" \
- -e "s|^|opensips_|g"
-
- local ret=$?
- [ $ret -ne 0 ] && echo "opensips_command_failed=1"
- return $ret
-}
-
-opensips_check() {
- # if the user did not provide an opensips_cmd
- # try to find it in the system
- if [ -z "$opensips_cmd" ]; then
- require_cmd opensipsctl || return 1
- opensips_cmd="$OPENSIPSCTL_CMD"
- fi
-
- # check once if the command works
- local x
- x="$(opensips_get_stats | grep "^opensips_core_")"
- # shellcheck disable=SC2181
- if [ ! $? -eq 0 ] || [ -z "$x" ]; then
- error "cannot get global status. Please set opensips_opts='options' whatever needed to get connected to opensips server, in $confd/opensips.conf"
- return 1
- fi
-
- return 0
-}
-
-opensips_create() {
- # create the charts
- cat << EOF
-CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every '' '' 'opensips'
-DIMENSION dialog_active_dialogs active absolute 1 1
-DIMENSION dialog_early_dialogs early absolute -1 1
-
-CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every '' '' 'opensips'
-DIMENSION usrloc_registered_users registered absolute 1 1
-DIMENSION usrloc_location_users location absolute 1 1
-DIMENSION usrloc_location_contacts contacts absolute 1 1
-DIMENSION usrloc_location_expires expires incremental -1 1
-
-CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every '' '' 'opensips'
-DIMENSION registrar_accepted_regs accepted incremental 1 1
-DIMENSION registrar_rejected_regs rejected incremental -1 1
-
-CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every '' '' 'opensips'
-DIMENSION tm_UAS_transactions UAS incremental 1 1
-DIMENSION tm_UAC_transactions UAC incremental -1 1
-
-CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every '' '' 'opensips'
-DIMENSION core_rcv_requests requests incremental 1 1
-DIMENSION core_rcv_replies replies incremental -1 1
-
-CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every '' '' 'opensips'
-DIMENSION core_fwd_requests requests incremental 1 1
-DIMENSION core_fwd_replies replies incremental -1 1
-
-CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every '' '' 'opensips'
-DIMENSION core_drop_requests requests incremental 1 1
-DIMENSION core_drop_replies replies incremental -1 1
-
-CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every '' '' 'opensips'
-DIMENSION core_err_requests requests incremental 1 1
-DIMENSION core_err_replies replies incremental -1 1
-
-CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every '' '' 'opensips'
-DIMENSION core_bad_URIs_rcvd bad_URIs_rcvd incremental 1 1
-DIMENSION core_unsupported_methods unsupported_methods incremental 1 1
-DIMENSION core_bad_msg_hdr bad_msg_hdr incremental 1 1
-
-CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every '' '' 'opensips'
-DIMENSION tm_received_replies received incremental 1 1
-DIMENSION tm_relayed_replies relayed incremental 1 1
-DIMENSION tm_local_replies local incremental 1 1
-
-CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every '' '' 'opensips'
-DIMENSION tm_2xx_transactions 2xx incremental 1 1
-DIMENSION tm_3xx_transactions 3xx incremental 1 1
-DIMENSION tm_4xx_transactions 4xx incremental 1 1
-DIMENSION tm_5xx_transactions 5xx incremental 1 1
-DIMENSION tm_6xx_transactions 6xx incremental 1 1
-
-CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every '' '' 'opensips'
-DIMENSION tm_inuse_transactions inuse absolute 1 1
-
-CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every '' '' 'opensips'
-DIMENSION sl_1xx_replies 1xx incremental 1 1
-DIMENSION sl_2xx_replies 2xx incremental 1 1
-DIMENSION sl_3xx_replies 3xx incremental 1 1
-DIMENSION sl_4xx_replies 4xx incremental 1 1
-DIMENSION sl_5xx_replies 5xx incremental 1 1
-DIMENSION sl_6xx_replies 6xx incremental 1 1
-DIMENSION sl_sent_replies sent incremental 1 1
-DIMENSION sl_sent_err_replies error incremental 1 1
-DIMENSION sl_received_ACKs ACKed incremental 1 1
-
-CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every '' '' 'opensips'
-DIMENSION dialog_processed_dialogs processed incremental 1 1
-DIMENSION dialog_expired_dialogs expired incremental 1 1
-DIMENSION dialog_failed_dialogs failed incremental -1 1
-
-CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every '' '' 'opensips'
-DIMENSION net_waiting_udp UDP absolute 1 1024
-DIMENSION net_waiting_tcp TCP absolute 1 1024
-
-CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every '' '' 'opensips'
-DIMENSION uri_positive_checks positive incremental 1 1
-DIMENSION uri_negative_checks negative incremental -1 1
-
-CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every '' '' 'opensips'
-DIMENSION siptrace_traced_requests requests incremental 1 1
-DIMENSION siptrace_traced_replies replies incremental -1 1
-
-CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every '' '' 'opensips'
-DIMENSION shmem_total_size total absolute 1 1024
-DIMENSION shmem_used_size used absolute 1 1024
-DIMENSION shmem_real_used_size real_used absolute 1 1024
-DIMENSION shmem_max_used_size max_used absolute 1 1024
-DIMENSION shmem_free_size free absolute 1 1024
-
-CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every '' '' 'opensips'
-DIMENSION shmem_fragments fragments absolute 1 1
-EOF
-
- return 0
-}
-
-opensips_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
-
- # 1. get the counters page from opensips
- # 2. sed to remove spaces; replace . with _; remove spaces around =; prepend each line with: local opensips_
- # 3. egrep lines starting with:
- # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9
- # 4. then execute this as a script with the eval
- # be very careful with eval:
- # prepare the script and always grep at the end the lines that are useful, so that
- # even if something goes wrong, no other code can be executed
-
- unset \
- opensips_dialog_active_dialogs \
- opensips_dialog_early_dialogs \
- opensips_usrloc_registered_users \
- opensips_usrloc_location_users \
- opensips_usrloc_location_contacts \
- opensips_usrloc_location_expires \
- opensips_registrar_accepted_regs \
- opensips_registrar_rejected_regs \
- opensips_tm_UAS_transactions \
- opensips_tm_UAC_transactions \
- opensips_core_rcv_requests \
- opensips_core_rcv_replies \
- opensips_core_fwd_requests \
- opensips_core_fwd_replies \
- opensips_core_drop_requests \
- opensips_core_drop_replies \
- opensips_core_err_requests \
- opensips_core_err_replies \
- opensips_core_bad_URIs_rcvd \
- opensips_core_unsupported_methods \
- opensips_core_bad_msg_hdr \
- opensips_tm_received_replies \
- opensips_tm_relayed_replies \
- opensips_tm_local_replies \
- opensips_tm_2xx_transactions \
- opensips_tm_3xx_transactions \
- opensips_tm_4xx_transactions \
- opensips_tm_5xx_transactions \
- opensips_tm_6xx_transactions \
- opensips_tm_inuse_transactions \
- opensips_sl_1xx_replies \
- opensips_sl_2xx_replies \
- opensips_sl_3xx_replies \
- opensips_sl_4xx_replies \
- opensips_sl_5xx_replies \
- opensips_sl_6xx_replies \
- opensips_sl_sent_replies \
- opensips_sl_sent_err_replies \
- opensips_sl_received_ACKs \
- opensips_dialog_processed_dialogs \
- opensips_dialog_expired_dialogs \
- opensips_dialog_failed_dialogs \
- opensips_net_waiting_udp \
- opensips_net_waiting_tcp \
- opensips_uri_positive_checks \
- opensips_uri_negative_checks \
- opensips_siptrace_traced_requests \
- opensips_siptrace_traced_replies \
- opensips_shmem_total_size \
- opensips_shmem_used_size \
- opensips_shmem_real_used_size \
- opensips_shmem_max_used_size \
- opensips_shmem_free_size \
- opensips_shmem_fragments
-
- opensips_command_failed=0
- eval "local $(opensips_get_stats)"
- # shellcheck disable=SC2181
- [ $? -ne 0 ] && return 1
-
- [ $opensips_command_failed -eq 1 ] && error "failed to get values, disabling." && return 1
-
- # write the result of the work.
- cat << VALUESEOF
-BEGIN opensips.dialogs_active $1
-SET dialog_active_dialogs = $opensips_dialog_active_dialogs
-SET dialog_early_dialogs = $opensips_dialog_early_dialogs
-END
-BEGIN opensips.users $1
-SET usrloc_registered_users = $opensips_usrloc_registered_users
-SET usrloc_location_users = $opensips_usrloc_location_users
-SET usrloc_location_contacts = $opensips_usrloc_location_contacts
-SET usrloc_location_expires = $opensips_usrloc_location_expires
-END
-BEGIN opensips.registrar $1
-SET registrar_accepted_regs = $opensips_registrar_accepted_regs
-SET registrar_rejected_regs = $opensips_registrar_rejected_regs
-END
-BEGIN opensips.transactions $1
-SET tm_UAS_transactions = $opensips_tm_UAS_transactions
-SET tm_UAC_transactions = $opensips_tm_UAC_transactions
-END
-BEGIN opensips.core_rcv $1
-SET core_rcv_requests = $opensips_core_rcv_requests
-SET core_rcv_replies = $opensips_core_rcv_replies
-END
-BEGIN opensips.core_fwd $1
-SET core_fwd_requests = $opensips_core_fwd_requests
-SET core_fwd_replies = $opensips_core_fwd_replies
-END
-BEGIN opensips.core_drop $1
-SET core_drop_requests = $opensips_core_drop_requests
-SET core_drop_replies = $opensips_core_drop_replies
-END
-BEGIN opensips.core_err $1
-SET core_err_requests = $opensips_core_err_requests
-SET core_err_replies = $opensips_core_err_replies
-END
-BEGIN opensips.core_bad $1
-SET core_bad_URIs_rcvd = $opensips_core_bad_URIs_rcvd
-SET core_unsupported_methods = $opensips_core_unsupported_methods
-SET core_bad_msg_hdr = $opensips_core_bad_msg_hdr
-END
-BEGIN opensips.tm_replies $1
-SET tm_received_replies = $opensips_tm_received_replies
-SET tm_relayed_replies = $opensips_tm_relayed_replies
-SET tm_local_replies = $opensips_tm_local_replies
-END
-BEGIN opensips.transactions_status $1
-SET tm_2xx_transactions = $opensips_tm_2xx_transactions
-SET tm_3xx_transactions = $opensips_tm_3xx_transactions
-SET tm_4xx_transactions = $opensips_tm_4xx_transactions
-SET tm_5xx_transactions = $opensips_tm_5xx_transactions
-SET tm_6xx_transactions = $opensips_tm_6xx_transactions
-END
-BEGIN opensips.transactions_inuse $1
-SET tm_inuse_transactions = $opensips_tm_inuse_transactions
-END
-BEGIN opensips.sl_replies $1
-SET sl_1xx_replies = $opensips_sl_1xx_replies
-SET sl_2xx_replies = $opensips_sl_2xx_replies
-SET sl_3xx_replies = $opensips_sl_3xx_replies
-SET sl_4xx_replies = $opensips_sl_4xx_replies
-SET sl_5xx_replies = $opensips_sl_5xx_replies
-SET sl_6xx_replies = $opensips_sl_6xx_replies
-SET sl_sent_replies = $opensips_sl_sent_replies
-SET sl_sent_err_replies = $opensips_sl_sent_err_replies
-SET sl_received_ACKs = $opensips_sl_received_ACKs
-END
-BEGIN opensips.dialogs $1
-SET dialog_processed_dialogs = $opensips_dialog_processed_dialogs
-SET dialog_expired_dialogs = $opensips_dialog_expired_dialogs
-SET dialog_failed_dialogs = $opensips_dialog_failed_dialogs
-END
-BEGIN opensips.net_waiting $1
-SET net_waiting_udp = $opensips_net_waiting_udp
-SET net_waiting_tcp = $opensips_net_waiting_tcp
-END
-BEGIN opensips.uri_checks $1
-SET uri_positive_checks = $opensips_uri_positive_checks
-SET uri_negative_checks = $opensips_uri_negative_checks
-END
-BEGIN opensips.traces $1
-SET siptrace_traced_requests = $opensips_siptrace_traced_requests
-SET siptrace_traced_replies = $opensips_siptrace_traced_replies
-END
-BEGIN opensips.shmem $1
-SET shmem_total_size = $opensips_shmem_total_size
-SET shmem_used_size = $opensips_shmem_used_size
-SET shmem_real_used_size = $opensips_shmem_real_used_size
-SET shmem_max_used_size = $opensips_shmem_max_used_size
-SET shmem_free_size = $opensips_shmem_free_size
-END
-BEGIN opensips.shmem_fragments $1
-SET shmem_fragments = $opensips_shmem_fragments
-END
-VALUESEOF
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/opensips/opensips.conf b/collectors/charts.d.plugin/opensips/opensips.conf
deleted file mode 100644
index e25111dce..000000000
--- a/collectors/charts.d.plugin/opensips/opensips.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-#opensips_opts="fifo get_statistics all"
-#opensips_cmd=
-#opensips_timeout=2
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#opensips_update_every=5
-
-# the charts priority on the dashboard
-#opensips_priority=80000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#opensips_retries=10
diff --git a/collectors/charts.d.plugin/sensors/Makefile.inc b/collectors/charts.d.plugin/sensors/Makefile.inc
deleted file mode 100644
index f466a1b62..000000000
--- a/collectors/charts.d.plugin/sensors/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_charts_DATA += sensors/sensors.chart.sh
-dist_chartsconfig_DATA += sensors/sensors.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
-
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
deleted file mode 120000
index 7e5a416c4..000000000
--- a/collectors/charts.d.plugin/sensors/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/linux_sensors_sysfs.md \ No newline at end of file
diff --git a/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md b/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md
deleted file mode 100644
index 130352f61..000000000
--- a/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/sensors/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/charts.d.plugin/sensors/metadata.yaml"
-sidebar_label: "Linux Sensors (sysfs)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Linux Sensors (sysfs)
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: sensors
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).
-For all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values."
-
-
-It will provide charts for all configured system sensors, by reading sensors directly from the kernel.
-The values graphed are the raw hardware values of the sensors.
-
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, the collector will try to read entries under `/sys/devices`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per sensor chip
-
-Metrics related to sensor chips. Each chip provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| sensors.temp | {filename} | Celsius |
-| sensors.volt | {filename} | Volts |
-| sensors.curr | {filename} | Ampere |
-| sensors.power | {filename} | Watt |
-| sensors.fans | {filename} | Rotations / Minute |
-| sensors.energy | {filename} | Joule |
-| sensors.humidity | {filename} | Percent |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Enable the sensors collector
-
-The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config charts.d.conf
-```
-
-Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/sensors.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/sensors.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the sensors collector.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |
-| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |
-| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |
-| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| sensors_priority | The charts priority on the dashboard. | 90000 | no |
-| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Set sensors path depth
-
-Set a different sensors path depth
-
-```yaml
-# the directory the kernel keeps sensor data
-#sensors_sys_dir="/sys/devices"
-
-# how deep in the tree to check for sensor data
-sensors_sys_depth=5
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-#sensors_source_update=1
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#sensors_update_every=
-
-# the charts priority on the dashboard
-#sensors_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#sensors_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 sensors
- ```
-
-
diff --git a/collectors/charts.d.plugin/sensors/metadata.yaml b/collectors/charts.d.plugin/sensors/metadata.yaml
deleted file mode 100644
index 47f6f4042..000000000
--- a/collectors/charts.d.plugin/sensors/metadata.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: sensors
- monitored_instance:
- name: Linux Sensors (sysfs)
- link: "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sensors
- - sysfs
- - hwmon
- - rpi
- - raspberry pi
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).
- For all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values."
- method_description: |
- It will provide charts for all configured system sensors, by reading sensors directly from the kernel.
- The values graphed are the raw hardware values of the sensors.
- supported_platforms:
- include: [Linux]
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, the collector will try to read entries under `/sys/devices`"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Enable the sensors collector"
- description: |
- The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config charts.d.conf
- ```
-
- Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
- configuration:
- file:
- name: charts.d/sensors.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the sensors collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: sensors_sys_dir
- description: The directory the kernel exposes sensor data.
- default_value: "/sys/devices"
- required: false
- - name: sensors_sys_depth
- description: How deep in the tree to check for sensor data.
- default_value: 10
- required: false
- - name: sensors_source_update
- description: If set to 1, the script will overwrite internal script functions with code generated ones.
- default_value: 1
- required: false
- - name: sensors_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: sensors_priority
- description: The charts priority on the dashboard.
- default_value: 90000
- required: false
- - name: sensors_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Set sensors path depth
- description: Set a different sensors path depth
- config: |
- # the directory the kernel keeps sensor data
- #sensors_sys_dir="/sys/devices"
-
- # how deep in the tree to check for sensor data
- sensors_sys_depth=5
-
- # if set to 1, the script will overwrite internal
- # script functions with code generated ones
- # leave to 1, is faster
- #sensors_source_update=1
-
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- #sensors_update_every=
-
- # the charts priority on the dashboard
- #sensors_priority=90000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #sensors_retries=10
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: sensor chip
- description: "Metrics related to sensor chips. Each chip provides its own set of the following metrics."
- labels: []
- metrics:
- - name: sensors.temp
- description: Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.volt
- description: Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.curr
- description: Current
- unit: "Ampere"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.power
- description: Power
- unit: "Watt"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.fans
- description: Fans Speed
- unit: "Rotations / Minute"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.energy
- description: Energy
- unit: "Joule"
- chart_type: area
- dimensions:
- - name: "{filename}"
- - name: sensors.humidity
- description: Humidity
- unit: "Percent"
- chart_type: line
- dimensions:
- - name: "{filename}"
diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh
deleted file mode 100644
index 9576e2ab2..000000000
--- a/collectors/charts.d.plugin/sensors/sensors.chart.sh
+++ /dev/null
@@ -1,250 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# sensors docs
-# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-
-# the directory the kernel keeps sensor data
-sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
-
-# how deep in the tree to check for sensor data
-sensors_sys_depth=10
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-sensors_source_update=1
-
-# how frequently to collect sensor data
-# the default is to collect it at every iteration of charts.d
-sensors_update_every=
-
-sensors_priority=90000
-
-declare -A sensors_excluded=()
-
-sensors_find_all_files() {
- find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null
-}
-
-sensors_find_all_dirs() {
- # shellcheck disable=SC2162
- sensors_find_all_files "$1" | while read; do
- dirname "$REPLY"
- done | sort -u
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-sensors_check() {
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
- return 0
-}
-
-sensors_check_files() {
- # we only need sensors that report a non-zero value
- # also remove not needed sensors
-
- local f v excluded
- for f in "$@"; do
- [ ! -f "$f" ] && continue
- for ex in "${sensors_excluded[@]}"; do
- [[ $f =~ .*$ex$ ]] && excluded='1' && break
- done
-
- [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0
- v=$((v + 1 - 1))
- [ $v -ne 0 ] && echo "$f" && continue
- excluded=
-
- error "$f gives zero values"
- done
-}
-
-sensors_check_temp_type() {
- # valid temp types are 1 to 6
- # disabled sensors have the value 0
-
- local f t v
- for f in "$@"; do
- # shellcheck disable=SC2001
- t=$(echo "$f" | sed "s|_input$|_type|g")
- [ "$f" = "$t" ] && echo "$f" && continue
- [ ! -f "$t" ] && echo "$f" && continue
-
- v="$(cat "$t")"
- v=$((v + 1 - 1))
- [ $v -ne 0 ] && echo "$f" && continue
-
- error "$f is disabled"
- done
-}
-
-# _create is called once, to create the charts
-sensors_create() {
- local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor
-
- # we create a script with the source of the
- # sensors_update() function
- # - the highest speed we can achieve -
- [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
-
- for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do
- dir=$(basename "$path")
- device=
- subsystem=
- id=
- type=
- name=
-
- [ -h "$path/device" ] && device=$(readlink -f "$path/device")
- [ ! -z "$device" ] && device=$(basename "$device")
- [ -z "$device" ] && device="$dir"
-
- [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem")
- [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem")
- [ -z "$subsystem" ] && subsystem="$dir"
-
- [ -f "$path/name" ] && name=$(cat "$path/name")
- [ -z "$name" ] && name="$dir"
-
- [ -f "$path/type" ] && type=$(cat "$path/type")
- [ -z "$type" ] && type="$dir"
-
- id="$(fixid "$device.$subsystem.$dir")"
-
- debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
-
- for mode in temperature voltage fans power current energy humidity; do
- files=
- multiplier=1
- divisor=1
- algorithm="absolute"
-
- case $mode in
- temperature)
- files="$(
- ls "$path"/temp*_input 2>/dev/null
- ls "$path/temp" 2>/dev/null
- )"
- files="$(sensors_check_files "$files")"
- files="$(sensors_check_temp_type "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.temp_${id}_${name}' '' 'Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.temp_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- voltage)
- files="$(ls "$path"/in*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.volt_${id}_${name}' '' 'Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.volt_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- current)
- files="$(ls "$path"/curr*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.curr_${id}_${name}' '' 'Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.curr_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- power)
- files="$(ls "$path"/power*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.power_${id}_${name}' '' 'Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.power_${id}_${name}' \$1\""
- divisor=1000000
- ;;
-
- fans)
- files="$(ls "$path"/fan*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.fan_${id}_${name}' '' 'Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.fan_${id}_${name}' \$1\""
- ;;
-
- energy)
- files="$(ls "$path"/energy*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.energy_${id}_${name}' '' 'Energy' 'Joule' 'energy' 'sensors.energy' area $((sensors_priority + 6)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.energy_${id}_${name}' \$1\""
- algorithm="incremental"
- divisor=1000000
- ;;
-
- humidity)
- files="$(ls "$path"/humidity*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.humidity_${id}_${name}' '' 'Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.humidity_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- *)
- continue
- ;;
- esac
-
- for x in $files; do
- file="$x"
- fid="$(fixid "$file")"
- lfile="$(basename "$file" | sed "s|_input$|_label|g")"
- labelname="$(basename "$file" | sed "s|_input$||g")"
-
- if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then
- labelname="$(cat "$path/$lfile")"
- fi
-
- echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
- echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )"
- done
-
- echo >>"$TMP_DIR/sensors.sh" "echo END"
- done
- done
-
- [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}"
-
- # ok, load the function sensors_update() we created
- # shellcheck source=/dev/null
- [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh"
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-sensors_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- # shellcheck source=/dev/null
- [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1"
-
- return 0
-}
diff --git a/collectors/charts.d.plugin/sensors/sensors.conf b/collectors/charts.d.plugin/sensors/sensors.conf
deleted file mode 100644
index bcb28807d..000000000
--- a/collectors/charts.d.plugin/sensors/sensors.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the directory the kernel keeps sensor data
-#sensors_sys_dir="/sys/devices"
-
-# how deep in the tree to check for sensor data
-#sensors_sys_depth=10
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-#sensors_source_update=1
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#sensors_update_every=
-
-# the charts priority on the dashboard
-#sensors_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#sensors_retries=10
-
diff --git a/collectors/checks.plugin/README.md b/collectors/checks.plugin/README.md
deleted file mode 100644
index 801f27752..000000000
--- a/collectors/checks.plugin/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-<!--
-title: "checks.plugin"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/checks.plugin/README.md"
-sidebar_label: "checks.plugin"
-learn_status: "Unpublished"
--->
-
-# checks.plugin
-
-A debugging plugin (by default it is disabled)
-
-
diff --git a/collectors/cups.plugin/Makefile.am b/collectors/cups.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/cups.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/cups.plugin/README.md b/collectors/cups.plugin/README.md
deleted file mode 120000
index e32570639..000000000
--- a/collectors/cups.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/cups.md \ No newline at end of file
diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c
deleted file mode 100644
index 8efd32e31..000000000
--- a/collectors/cups.plugin/cups_plugin.c
+++ /dev/null
@@ -1,437 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-/*
- * netdata cups.plugin
- * (C) Copyright 2017-2018 Simon Nagl <simon.nagl@gmx.de>
- * Released under GPL v3+
- */
-
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#include <cups/cups.h>
-#include <limits.h>
-
-// Variables
-
-static int debug = 0;
-
-static int netdata_update_every = 1;
-static uint32_t netdata_priority = 100004;
-
-http_t *http; // connection to the cups daemon
-
-/*
- * Used to aggregate job metrics for a destination (and all destinations).
- */
-struct job_metrics {
- uint32_t id;
-
- bool is_collected; // flag if this was collected in the current cycle
-
- int num_pending;
- int num_processing;
- int num_held;
-
- int size_pending; // in kilobyte
- int size_processing; // in kilobyte
- int size_held; // in kilobyte
-};
-DICTIONARY *dict_dest_job_metrics = NULL;
-struct job_metrics global_job_metrics;
-
-int num_dest_total;
-int num_dest_accepting_jobs;
-int num_dest_shared;
-
-int num_dest_idle;
-int num_dest_printing;
-int num_dest_stopped;
-
-void print_help() {
- fprintf(stderr,
- "\n"
- "netdata cups.plugin %s\n"
- "\n"
- "Copyright (C) 2017-2018 Simon Nagl <simon.nagl@gmx.de>\n"
- "Released under GNU General Public License v3+.\n"
- "All rights reserved.\n"
- "\n"
- "This program is a data collector plugin for netdata.\n"
- "\n"
- "SYNOPSIS: cups.plugin [-d][-h][-v] COLLECTION_FREQUENCY\n"
- "\n"
- "Options:"
- "\n"
- " COLLECTION_FREQUENCY data collection frequency in seconds\n"
- "\n"
- " -d enable verbose output\n"
- " default: disabled\n"
- "\n"
- " -v print version and exit\n"
- "\n"
- " -h print this message and exit\n"
- "\n",
- VERSION);
-}
-
-void parse_command_line(int argc, char **argv) {
- int i;
- int freq = 0;
- int update_every_found = 0;
- for (i = 1; i < argc; i++) {
- if (isdigit(*argv[i]) && !update_every_found) {
- int n = str2i(argv[i]);
- if (n > 0 && n < 86400) {
- freq = n;
- continue;
- }
- } else if (strcmp("-v", argv[i]) == 0) {
- printf("cups.plugin %s\n", VERSION);
- exit(0);
- } else if (strcmp("-d", argv[i]) == 0) {
- debug = 1;
- continue;
- } else if (strcmp("-h", argv[i]) == 0) {
- print_help();
- exit(0);
- }
-
- print_help();
- exit(1);
- }
-
- if (freq >= netdata_update_every) {
- netdata_update_every = freq;
- } else if (freq) {
- netdata_log_error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
- }
-}
-
-/*
- * 'cupsGetIntegerOption()' - Get an integer option value.
- *
- * INT_MIN is returned when the option does not exist, is not an integer, or
- * exceeds the range of values for the "int" type.
- *
- * @since CUPS 2.2.4/macOS 10.13@
- */
-
-int /* O - Option value or @code INT_MIN@ */
-getIntegerOption(
- const char *name, /* I - Name of option */
- int num_options, /* I - Number of options */
- cups_option_t *options) /* I - Options */
-{
- const char *value = cupsGetOption(name, num_options, options);
- /* String value of option */
- char *ptr; /* Pointer into string value */
- long intvalue; /* Integer value */
-
-
- if (!value || !*value)
- return (INT_MIN);
-
- intvalue = strtol(value, &ptr, 10);
- if (intvalue < INT_MIN || intvalue > INT_MAX || *ptr)
- return (INT_MIN);
-
- return ((int)intvalue);
-}
-
-static int reset_job_metrics(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data __maybe_unused) {
- struct job_metrics *jm = (struct job_metrics *)entry;
-
- jm->is_collected = false;
- jm->num_held = 0;
- jm->num_pending = 0;
- jm->num_processing = 0;
- jm->size_held = 0;
- jm->size_pending = 0;
- jm->size_processing = 0;
-
- return 0;
-}
-
-void send_job_charts_definitions_to_netdata(const char *name, uint32_t job_id, bool obsolete) {
- printf("CHART cups.job_num_%s '' 'Active jobs of %s' jobs '%s' cups.destination_job_num stacked %u %i %s\n",
- name, name, name, netdata_priority + job_id, netdata_update_every, obsolete?"obsolete":"");
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
-
- printf("CHART cups.job_size_%s '' 'Active jobs size of %s' KB '%s' cups.destination_job_size stacked %u %i %s\n",
- name, name, name, netdata_priority + 1 + job_id, netdata_update_every, obsolete?"obsolete":"");
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
-}
-
-struct job_metrics *get_job_metrics(char *dest) {
- struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest);
-
- if (unlikely(!jm)) {
- static uint32_t job_id = 0;
- struct job_metrics new_job_metrics = { .id = ++job_id };
- jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics));
- send_job_charts_definitions_to_netdata(dest, jm->id, false);
- };
-
- return jm;
-}
-
-int send_job_metrics_to_netdata(const DICTIONARY_ITEM *item, void *entry, void *data __maybe_unused) {
- const char *name = dictionary_acquired_item_name(item);
-
- struct job_metrics *jm = (struct job_metrics *)entry;
-
- if (jm->is_collected) {
- printf(
- "BEGIN cups.job_num_%s\n"
- "SET pending = %d\n"
- "SET held = %d\n"
- "SET processing = %d\n"
- "END\n",
- name, jm->num_pending, jm->num_held, jm->num_processing);
- printf(
- "BEGIN cups.job_size_%s\n"
- "SET pending = %d\n"
- "SET held = %d\n"
- "SET processing = %d\n"
- "END\n",
- name, jm->size_pending, jm->size_held, jm->size_processing);
- }
- else {
- // mark it obsolete
- send_job_charts_definitions_to_netdata(name, jm->id, true);
-
- // delete it
- dictionary_del(dict_dest_job_metrics, name);
- }
-
- return 0;
-}
-
-void reset_metrics() {
- num_dest_total = 0;
- num_dest_accepting_jobs = 0;
- num_dest_shared = 0;
-
- num_dest_idle = 0;
- num_dest_printing = 0;
- num_dest_stopped = 0;
-
- reset_job_metrics(NULL, &global_job_metrics, NULL);
- dictionary_walkthrough_write(dict_dest_job_metrics, reset_job_metrics, NULL);
-}
-
-int main(int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("cups.plugin");
-
- parse_command_line(argc, argv);
-
- errno = 0;
-
- dict_dest_job_metrics = dictionary_create(DICT_OPTION_SINGLE_THREADED);
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if (debug)
- fprintf(stderr, "starting data collection\n");
-
- time_t started_t = now_monotonic_sec();
- size_t iteration = 0;
- usec_t step = netdata_update_every * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- for (iteration = 0; 1; iteration++) {
- heartbeat_next(&hb, step);
-
- if (unlikely(netdata_exit))
- break;
-
- reset_metrics();
-
- cups_dest_t *dests;
- num_dest_total = cupsGetDests2(http, &dests);
-
- if(unlikely(num_dest_total == 0)) {
- // reconnect to cups to check if the server is down.
- httpClose(http);
- http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL);
- if(http == NULL) {
- netdata_log_error("cups daemon is not running. Exiting!");
- exit(1);
- }
- }
-
- cups_dest_t *curr_dest = dests;
- int counter = 0;
- while (counter < num_dest_total) {
- if (counter != 0) {
- curr_dest++;
- }
- counter++;
-
- const char *printer_uri_supported = cupsGetOption("printer-uri-supported", curr_dest->num_options, curr_dest->options);
- if (!printer_uri_supported) {
- if(debug)
- fprintf(stderr, "destination %s discovered, but not yet setup as a local printer", curr_dest->name);
- continue;
- }
-
- const char *printer_is_accepting_jobs = cupsGetOption("printer-is-accepting-jobs", curr_dest->num_options, curr_dest->options);
- if (printer_is_accepting_jobs && !strcmp(printer_is_accepting_jobs, "true")) {
- num_dest_accepting_jobs++;
- }
-
- const char *printer_is_shared = cupsGetOption("printer-is-shared", curr_dest->num_options, curr_dest->options);
- if (printer_is_shared && !strcmp(printer_is_shared, "true")) {
- num_dest_shared++;
- }
-
- int printer_state = getIntegerOption("printer-state", curr_dest->num_options, curr_dest->options);
- switch (printer_state) {
- case 3:
- num_dest_idle++;
- break;
- case 4:
- num_dest_printing++;
- break;
- case 5:
- num_dest_stopped++;
- break;
- case INT_MIN:
- if(debug)
- fprintf(stderr, "printer state is missing for destination %s", curr_dest->name);
- break;
- default:
- netdata_log_error("Unknown printer state (%d) found.", printer_state);
- break;
- }
-
- /*
- * flag job metrics to print values.
- * This is needed to report also destinations with zero active jobs.
- */
- struct job_metrics *jm = get_job_metrics(curr_dest->name);
- jm->is_collected = true;
- }
- cupsFreeDests(num_dest_total, dests);
-
- if (unlikely(netdata_exit))
- break;
-
- cups_job_t *jobs, *curr_job;
- int num_jobs = cupsGetJobs2(http, &jobs, NULL, 0, CUPS_WHICHJOBS_ACTIVE);
- int i;
- for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) {
- struct job_metrics *jm = get_job_metrics(curr_job->dest);
- jm->is_collected = true;
-
- switch (curr_job->state) {
- case IPP_JOB_PENDING:
- jm->num_pending++;
- jm->size_pending += curr_job->size;
- global_job_metrics.num_pending++;
- global_job_metrics.size_pending += curr_job->size;
- break;
- case IPP_JOB_HELD:
- jm->num_held++;
- jm->size_held += curr_job->size;
- global_job_metrics.num_held++;
- global_job_metrics.size_held += curr_job->size;
- break;
- case IPP_JOB_PROCESSING:
- jm->num_processing++;
- jm->size_processing += curr_job->size;
- global_job_metrics.num_processing++;
- global_job_metrics.size_processing += curr_job->size;
- break;
- default:
- netdata_log_error("Unsupported job state (%u) found.", curr_job->state);
- break;
- }
- }
- cupsFreeJobs(num_jobs, jobs);
-
- dictionary_walkthrough_write(dict_dest_job_metrics, send_job_metrics_to_netdata, NULL);
- dictionary_garbage_collect(dict_dest_job_metrics);
-
- static int cups_printer_by_option_created = 0;
- if (unlikely(!cups_printer_by_option_created))
- {
- cups_printer_by_option_created = 1;
- printf("CHART cups.dest_state '' 'Destinations by state' dests overview cups.dests_state stacked 100000 %i\n", netdata_update_every);
- printf("DIMENSION idle '' absolute 1 1\n");
- printf("DIMENSION printing '' absolute 1 1\n");
- printf("DIMENSION stopped '' absolute 1 1\n");
-
- printf("CHART cups.dest_option '' 'Destinations by option' dests overview cups.dests_option line 100001 %i\n", netdata_update_every);
- printf("DIMENSION total '' absolute 1 1\n");
- printf("DIMENSION acceptingjobs '' absolute 1 1\n");
- printf("DIMENSION shared '' absolute 1 1\n");
-
- printf("CHART cups.job_num '' 'Active jobs' jobs overview cups.job_num stacked 100002 %i\n", netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
-
- printf("CHART cups.job_size '' 'Active jobs size' KB overview cups.job_size stacked 100003 %i\n", netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
- }
-
- printf(
- "BEGIN cups.dest_state\n"
- "SET idle = %d\n"
- "SET printing = %d\n"
- "SET stopped = %d\n"
- "END\n",
- num_dest_idle, num_dest_printing, num_dest_stopped);
- printf(
- "BEGIN cups.dest_option\n"
- "SET total = %d\n"
- "SET acceptingjobs = %d\n"
- "SET shared = %d\n"
- "END\n",
- num_dest_total, num_dest_accepting_jobs, num_dest_shared);
- printf(
- "BEGIN cups.job_num\n"
- "SET pending = %d\n"
- "SET held = %d\n"
- "SET processing = %d\n"
- "END\n",
- global_job_metrics.num_pending, global_job_metrics.num_held, global_job_metrics.num_processing);
- printf(
- "BEGIN cups.job_size\n"
- "SET pending = %d\n"
- "SET held = %d\n"
- "SET processing = %d\n"
- "END\n",
- global_job_metrics.size_pending, global_job_metrics.size_held, global_job_metrics.size_processing);
-
- fflush(stdout);
-
- if (unlikely(netdata_exit))
- break;
-
- // restart check (14400 seconds)
- if (!now_monotonic_sec() - started_t > 14400)
- break;
-
- fprintf(stdout, "\n");
- fflush(stdout);
- if (ferror(stdout) && errno == EPIPE) {
- netdata_log_error("error writing to stdout: EPIPE. Exiting...");
- return 1;
- }
- }
-
- httpClose(http);
- netdata_log_info("CUPS process exiting");
-}
diff --git a/collectors/cups.plugin/integrations/cups.md b/collectors/cups.plugin/integrations/cups.md
deleted file mode 100644
index a8ea5b15f..000000000
--- a/collectors/cups.plugin/integrations/cups.md
+++ /dev/null
@@ -1,141 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/cups.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/cups.plugin/metadata.yaml"
-sidebar_label: "CUPS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# CUPS
-
-
-<img src="https://netdata.cloud/img/cups.png" width="150"/>
-
-
-Plugin: cups.plugin
-Module: cups.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.
-
-The plugin uses CUPS shared library to connect and monitor the server.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin detects when CUPS server is running and tries to connect to it.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per CUPS instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cups.dests_state | idle, printing, stopped | dests |
-| cups.dests_option | total, acceptingjobs, shared | dests |
-| cups.job_num | pending, held, processing | jobs |
-| cups.job_size | pending, held, processing | KB |
-
-### Per destination
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cups.destination_job_num | pending, held, processing | jobs |
-| cups.destination_job_size | pending, held, processing | KB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Minimum setup
-
-The CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:cups]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Additional parameters for the collector | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/cups.plugin/metadata.yaml b/collectors/cups.plugin/metadata.yaml
deleted file mode 100644
index 9ec2f4118..000000000
--- a/collectors/cups.plugin/metadata.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
-plugin_name: cups.plugin
-modules:
- - meta:
- plugin_name: cups.plugin
- module_name: cups.plugin
- monitored_instance:
- name: CUPS
- link: "https://www.cups.org/"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "cups.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks."
- method_description: "The plugin uses CUPS shared library to connect and monitor the server."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library."
- default_behavior:
- auto_detection:
- description: "The plugin detects when CUPS server is running and tries to connect to it."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Minimum setup
- description: "The CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`."
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:cups]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Additional parameters for the collector
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: cups.dests_state
- description: Destinations by state
- unit: "dests"
- chart_type: stacked
- dimensions:
- - name: idle
- - name: printing
- - name: stopped
- - name: cups.dests_option
- description: Destinations by option
- unit: "dests"
- chart_type: line
- dimensions:
- - name: total
- - name: acceptingjobs
- - name: shared
- - name: cups.job_num
- description: Active jobs
- unit: "jobs"
- chart_type: stacked
- dimensions:
- - name: pending
- - name: held
- - name: processing
- - name: cups.job_size
- description: Active jobs size
- unit: "KB"
- chart_type: stacked
- dimensions:
- - name: pending
- - name: held
- - name: processing
- - name: destination
- description: ""
- labels: []
- metrics:
- - name: cups.destination_job_num
- description: Active jobs of {destination}
- unit: "jobs"
- chart_type: stacked
- dimensions:
- - name: pending
- - name: held
- - name: processing
- - name: cups.destination_job_size
- description: Active jobs size of {destination}
- unit: "KB"
- chart_type: stacked
- dimensions:
- - name: pending
- - name: held
- - name: processing
diff --git a/collectors/debugfs.plugin/Makefile.am b/collectors/debugfs.plugin/Makefile.am
deleted file mode 100644
index 02fe3a314..000000000
--- a/collectors/debugfs.plugin/Makefile.am
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
diff --git a/collectors/debugfs.plugin/README.md b/collectors/debugfs.plugin/README.md
deleted file mode 100644
index a2dc9c0f6..000000000
--- a/collectors/debugfs.plugin/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# OS provided metrics (debugfs.plugin)
-
-`debugfs.plugin` gathers metrics from the `/sys/kernel/debug` folder on Linux
-systems. [Debugfs](https://docs.kernel.org/filesystems/debugfs.html) exists as an easy way for kernel developers to
-make information available to user space.
-
-This plugin
-is [external](https://github.com/netdata/netdata/tree/master/collectors#collector-architecture-and-terminology),
-the netdata daemon spawns it as a long-running independent process.
-
-In detail, it collects metrics from:
-
-- `/sys/kernel/debug/extfrag` (Memory fragmentation index for each order and zone).
-- `/sys/kernel/debug/zswap` ([Zswap](https://www.kernel.org/doc/Documentation/vm/zswap.txt) performance statistics).
-
-## Prerequisites
-
-### Permissions
-
-> No user action required.
-
-The debugfs root directory is accessible only to the root user by default. Netdata
-uses [Linux Capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) to give the plugin access
-to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read
-permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
-
-## Metrics
-
-| Metric | Scope | Dimensions | Units | Labels |
-|-------------------------------------|:---------:|:---------------------------------------------------------------------------------------:|:------------:|:---------:|
-| mem.fragmentation_index_dma | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
-| mem.fragmentation_index_dma32 | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
-| mem.fragmentation_index_normal | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
-| system.zswap_pool_compression_ratio | | compression_ratio | ratio | |
-| system.zswap_pool_compressed_size | | compressed_size | bytes | |
-| system.zswap_pool_raw_size | | uncompressed_size | bytes | |
-| system.zswap_rejections | | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s | |
-| system.zswap_pool_limit_hit | | limit | events/s | |
-| system.zswap_written_back_raw_bytes | | written_back | bytes/s | |
-| system.zswap_same_filled_raw_size | | same_filled | bytes | |
-| system.zswap_duplicate_entry | | entries | entries/s | |
-
-## Troubleshooting
-
-To troubleshoot issues with the collector, run the `debugfs.plugin` in the terminal. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `debugfs.plugin` to debug the collector:
-
- ```bash
- ./debugfs.plugin
- ```
diff --git a/collectors/debugfs.plugin/debugfs_extfrag.c b/collectors/debugfs.plugin/debugfs_extfrag.c
deleted file mode 100644
index 75da4deca..000000000
--- a/collectors/debugfs.plugin/debugfs_extfrag.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "debugfs_plugin.h"
-
-#define NETDATA_ORDER_FRAGMENTATION 11
-
-static char *orders[NETDATA_ORDER_FRAGMENTATION] = { "order0", "order1", "order2", "order3", "order4",
- "order5", "order6", "order7", "order8", "order9",
- "order10"
-};
-
-static struct netdata_extrafrag {
- char *node_zone;
- uint32_t hash;
-
- char *id;
-
- collected_number orders[NETDATA_ORDER_FRAGMENTATION];
-
- struct netdata_extrafrag *next;
-} *netdata_extrafrags_root = NULL;
-
-static struct netdata_extrafrag *find_or_create_extrafrag(const char *name)
-{
- struct netdata_extrafrag *extrafrag;
- uint32_t hash = simple_hash(name);
-
- // search it, from beginning to the end
- for (extrafrag = netdata_extrafrags_root ; extrafrag ; extrafrag = extrafrag->next) {
- if (unlikely(hash == extrafrag->hash && !strcmp(name, extrafrag->node_zone))) {
- return extrafrag;
- }
- }
-
- extrafrag = callocz(1, sizeof(struct netdata_extrafrag));
- extrafrag->node_zone = strdupz(name);
- extrafrag->hash = hash;
-
- if (netdata_extrafrags_root) {
- struct netdata_extrafrag *last_node;
- for (last_node = netdata_extrafrags_root; last_node->next ; last_node = last_node->next);
-
- last_node->next = extrafrag;
- } else
- netdata_extrafrags_root = extrafrag;
-
-
- return extrafrag;
-}
-
-static void extfrag_send_chart(char *chart_id, collected_number *values)
-{
- int i;
- fprintf(stdout, "BEGIN mem.fragmentation_index_%s\n", chart_id);
- for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) {
- fprintf(stdout, "SET %s = %lld\n", orders[i], values[i]);
- }
- fprintf(stdout, "END\n");
- fflush(stdout);
-}
-
-int do_debugfs_extfrag(int update_every, const char *name) {
- static procfile *ff = NULL;
- static int chart_order = NETDATA_CHART_PRIO_MEM_FRAGMENTATION;
-
- if (unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename,
- FILENAME_MAX,
- "%s%s",
- netdata_configured_host_prefix,
- "/sys/kernel/debug/extfrag/extfrag_index");
-
- ff = procfile_open(filename, " \t,", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) return 1;
-
- size_t l, i, j, lines = procfile_lines(ff);
- for (l = 0; l < lines; l++) {
- char chart_id[64];
- char zone_lowercase[32];
- if (unlikely(procfile_linewords(ff, l) < 15)) continue;
- char *zone = procfile_lineword(ff, l, 3);
- strncpyz(zone_lowercase, zone, 31);
- debugfs2lower(zone_lowercase);
-
- char *id = procfile_lineword(ff, l, 1);
- snprintfz(chart_id, 63, "node_%s_%s", id, zone_lowercase);
- debugfs2lower(chart_id);
-
- struct netdata_extrafrag *extrafrag = find_or_create_extrafrag(chart_id);
- collected_number *line_orders = extrafrag->orders;
- for (i = 4, j = 0 ; i < 15; i++, j++) {
- NETDATA_DOUBLE value = str2ndd(procfile_lineword(ff, l, i), NULL);
- line_orders[j] = (collected_number) (value * 1000.0);
- }
-
- if (unlikely(!extrafrag->id)) {
- extrafrag->id = extrafrag->node_zone;
- fprintf(
- stdout,
- "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.fragmentation_index_%s' 'line' %d %d '' 'debugfs.plugin' '%s'\n",
- extrafrag->node_zone,
- zone_lowercase,
- chart_order++, // FIXME: the same zones must have the same order
- update_every,
- name);
- for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) {
- fprintf(stdout, "DIMENSION '%s' '%s' absolute 1 1000 ''\n", orders[i], orders[i]);
- }
- fprintf(stdout,
- "CLABEL 'numa_node' 'node%s' 1\n"
- "CLABEL_COMMIT\n",
- id);
- }
- extfrag_send_chart(chart_id, line_orders);
- }
-
- return 0;
-}
diff --git a/collectors/debugfs.plugin/debugfs_plugin.c b/collectors/debugfs.plugin/debugfs_plugin.c
deleted file mode 100644
index 13012ec40..000000000
--- a/collectors/debugfs.plugin/debugfs_plugin.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "debugfs_plugin.h"
-#include "libnetdata/required_dummies.h"
-
-static char *user_config_dir = CONFIG_DIR;
-static char *stock_config_dir = LIBCONFIG_DIR;
-
-static int update_every = 1;
-
-static struct debugfs_module {
- const char *name;
-
- int enabled;
-
- int (*func)(int update_every, const char *name);
-} debugfs_modules[] = {
- // Memory Fragmentation
- { .name = "/sys/kernel/debug/extfrag", .enabled = CONFIG_BOOLEAN_YES,
- .func = do_debugfs_extfrag},
- { .name = "/sys/kernel/debug/zswap", .enabled = CONFIG_BOOLEAN_YES,
- .func = do_debugfs_zswap},
- // Linux powercap metrics is here because it needs privilege to read each RAPL zone
- { .name = "/sys/devices/virtual/powercap", .enabled = CONFIG_BOOLEAN_YES,
- .func = do_sys_devices_virtual_powercap},
-
- // The terminator
- { .name = NULL, .enabled = CONFIG_BOOLEAN_NO, .func = NULL}
-};
-
-#ifdef HAVE_CAPABILITY
-static int debugfs_check_capabilities()
-{
- cap_t caps = cap_get_proc();
- if (!caps) {
- netdata_log_error("Cannot get current capabilities.");
- return 0;
- }
-
- int ret = 1;
- cap_flag_value_t cfv = CAP_CLEAR;
- if (cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) {
- netdata_log_error("Cannot find if CAP_DAC_READ_SEARCH is effective.");
- ret = 0;
- } else {
- if (cfv != CAP_SET) {
- netdata_log_error("debugfs.plugin should run with CAP_DAC_READ_SEARCH.");
- ret = 0;
- }
- }
- cap_free(caps);
-
- return ret;
-}
-#else
-static int debugfs_check_capabilities()
-{
- return 0;
-}
-#endif
-
-// TODO: This is a function used by 3 different collector, we should do it global (next PR)
-static int debugfs_am_i_running_as_root()
-{
- uid_t uid = getuid(), euid = geteuid();
-
- if (uid == 0 || euid == 0) {
- return 1;
- }
-
- return 0;
-}
-
-void debugfs2lower(char *name)
-{
- while (*name) {
- *name = tolower(*name);
- name++;
- }
-}
-
-// Consiidering our goal to redce binaries, I preferred to copy function, instead to force link with unecessary libs
-const char *debugfs_rrdset_type_name(RRDSET_TYPE chart_type) {
- switch(chart_type) {
- case RRDSET_TYPE_LINE:
- default:
- return RRDSET_TYPE_LINE_NAME;
-
- case RRDSET_TYPE_AREA:
- return RRDSET_TYPE_AREA_NAME;
-
- case RRDSET_TYPE_STACKED:
- return RRDSET_TYPE_STACKED_NAME;
- }
-}
-
-const char *debugfs_rrd_algorithm_name(RRD_ALGORITHM algorithm) {
- switch(algorithm) {
- case RRD_ALGORITHM_ABSOLUTE:
- default:
- return RRD_ALGORITHM_ABSOLUTE_NAME;
-
- case RRD_ALGORITHM_INCREMENTAL:
- return RRD_ALGORITHM_INCREMENTAL_NAME;
-
- case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
- return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME;
-
- case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
- return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME;
- }
-}
-
-int debugfs_check_sys_permission() {
- int ret = 0;
-
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/sys/kernel/debug/extfrag/extfrag_index", netdata_configured_host_prefix);
-
- procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(!ff) goto dcsp_cleanup;
-
- ff = procfile_readall(ff);
- if(!ff) goto dcsp_cleanup;
-
- ret = 1;
-
-dcsp_cleanup:
- if (!ret)
- perror("Cannot open /sys/kernel/debug/extfrag/extfrag_index file");
- procfile_close(ff);
- return ret;
-}
-
-static void debugfs_parse_args(int argc, char **argv)
-{
- int i, freq = 0;
- for(i = 1; i < argc; i++) {
- if(!freq) {
- int n = (int)str2l(argv[i]);
- if(n > 0) {
- freq = n;
- continue;
- }
- }
-
- if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
- if(!debugfs_check_sys_permission()) {
- exit(2);
- }
- printf("OK\n");
- exit(0);
- }
- }
-
- if(freq > 0) update_every = freq;
-}
-
-int main(int argc, char **argv)
-{
- clocks_init();
- nd_log_initialize_for_external_plugins("debugfs.plugin");
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if (verify_netdata_host_prefix(true) == -1)
- exit(1);
-
- user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
- if (user_config_dir == NULL) {
- user_config_dir = CONFIG_DIR;
- }
-
- stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
- if (stock_config_dir == NULL) {
- // netdata_log_info("NETDATA_CONFIG_DIR is not passed from netdata");
- stock_config_dir = LIBCONFIG_DIR;
- }
-
- // FIXME: should first check if /sys/kernel/debug is mounted
-
- // FIXME: remove debugfs_check_sys_permission() after https://github.com/netdata/netdata/issues/15048 is fixed
- if (!debugfs_check_capabilities() && !debugfs_am_i_running_as_root() && !debugfs_check_sys_permission()) {
- uid_t uid = getuid(), euid = geteuid();
-#ifdef HAVE_CAPABILITY
- netdata_log_error(
- "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, debugfs.plugin cannot access /sys/kernel/debug. "
- "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; ",
- uid,
- euid,
- argv[0],
- argv[0],
- argv[0]);
-#else
- netdata_log_error(
- "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, debugfs.plugin cannot access /sys/kernel/debug."
- "Your system does not support capabilities. "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; ",
- uid,
- euid,
- argv[0],
- argv[0]);
-#endif
- exit(1);
- }
-
- // if (!debugfs_check_sys_permission()) {
- // exit(2);
- // }
-
- debugfs_parse_args(argc, argv);
-
- size_t iteration;
- usec_t step = update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- for (iteration = 0; iteration < 86400; iteration++) {
- heartbeat_next(&hb, step);
- int enabled = 0;
-
- for (int i = 0; debugfs_modules[i].name; i++) {
- struct debugfs_module *pm = &debugfs_modules[i];
- if (unlikely(!pm->enabled))
- continue;
-
- pm->enabled = !pm->func(update_every, pm->name);
- if (likely(pm->enabled))
- enabled++;
- }
- if (!enabled) {
- netdata_log_info("all modules are disabled, exiting...");
- return 1;
- }
-
- fprintf(stdout, "\n");
- fflush(stdout);
- if (ferror(stdout) && errno == EPIPE) {
- netdata_log_error("error writing to stdout: EPIPE. Exiting...");
- return 1;
- }
- }
-
- fprintf(stdout, "EXIT\n");
- fflush(stdout);
- return 0;
-}
diff --git a/collectors/debugfs.plugin/debugfs_plugin.h b/collectors/debugfs.plugin/debugfs_plugin.h
deleted file mode 100644
index 903e4a19e..000000000
--- a/collectors/debugfs.plugin/debugfs_plugin.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_DEBUGFS_PLUGIN_H
-#define NETDATA_DEBUGFS_PLUGIN_H 1
-
-#include "libnetdata/libnetdata.h"
-#include "collectors/all.h"
-#include "database/rrd.h"
-
-int do_debugfs_extfrag(int update_every, const char *name);
-int do_debugfs_zswap(int update_every, const char *name);
-int do_sys_devices_virtual_powercap(int update_every, const char *name);
-void debugfs2lower(char *name);
-const char *debugfs_rrdset_type_name(RRDSET_TYPE chart_type);
-const char *debugfs_rrd_algorithm_name(RRD_ALGORITHM algorithm);
-
-#endif // NETDATA_DEBUGFS_PLUGIN_H
diff --git a/collectors/debugfs.plugin/debugfs_zswap.c b/collectors/debugfs.plugin/debugfs_zswap.c
deleted file mode 100644
index 502a04f1f..000000000
--- a/collectors/debugfs.plugin/debugfs_zswap.c
+++ /dev/null
@@ -1,437 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "debugfs_plugin.h"
-
-static long system_page_size = 4096;
-
-static collected_number pages_to_bytes(collected_number value)
-{
- return value * system_page_size;
-}
-
-struct netdata_zswap_metric {
- const char *filename;
-
- const char *chart_id;
- const char *title;
- const char *units;
- RRDSET_TYPE charttype;
- int prio;
- const char *dimension;
- RRD_ALGORITHM algorithm;
- int divisor;
-
- int enabled;
- int chart_created;
-
- collected_number value;
- collected_number (*convertv)(collected_number v);
-};
-
-static struct netdata_zswap_metric zswap_calculated_metrics[] = {
- {.filename = "",
- .chart_id = "pool_compression_ratio",
- .dimension = "compression_ratio",
- .units = "ratio",
- .title = "Zswap compression ratio",
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_LINE,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_COMPRESS_RATIO,
- .divisor = 100,
- .convertv = NULL,
- .value = -1},
-};
-
-enum netdata_zswap_calculated {
- NETDATA_ZSWAP_COMPRESSION_RATIO_CHART,
-};
-
-enum netdata_zwap_independent {
- NETDATA_ZSWAP_POOL_TOTAL_SIZE,
- NETDATA_ZSWAP_STORED_PAGES,
- NETDATA_ZSWAP_POOL_LIMIT_HIT,
- NETDATA_ZSWAP_WRITTEN_BACK_PAGES,
- NETDATA_ZSWAP_SAME_FILLED_PAGES,
- NETDATA_ZSWAP_DUPLICATE_ENTRY,
-
- // Terminator
- NETDATA_ZSWAP_SITE_END
-};
-
-static struct netdata_zswap_metric zswap_independent_metrics[] = {
- // https://elixir.bootlin.com/linux/latest/source/mm/zswap.c
- {.filename = "/sys/kernel/debug/zswap/pool_total_size",
- .chart_id = "pool_compressed_size",
- .dimension = "compressed_size",
- .units = "bytes",
- .title = "Zswap compressed bytes currently stored",
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_AREA,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_POOL_TOT_SIZE,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/stored_pages",
- .chart_id = "pool_raw_size",
- .dimension = "uncompressed_size",
- .units = "bytes",
- .title = "Zswap uncompressed bytes currently stored",
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_AREA,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_STORED_PAGE,
- .divisor = 1,
- .convertv = pages_to_bytes,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/pool_limit_hit",
- .chart_id = "pool_limit_hit",
- .dimension = "limit",
- .units = "events/s",
- .title = "Zswap pool limit was reached",
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_LINE,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_POOL_LIM_HIT,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/written_back_pages",
- .chart_id = "written_back_raw_bytes",
- .dimension = "written_back",
- .units = "bytes/s",
- .title = "Zswap uncomressed bytes written back when pool limit was reached",
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_AREA,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_WRT_BACK_PAGES,
- .divisor = 1,
- .convertv = pages_to_bytes,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/same_filled_pages",
- .chart_id = "same_filled_raw_size",
- .dimension = "same_filled",
- .units = "bytes",
- .title = "Zswap same-value filled uncompressed bytes currently stored",
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_AREA,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_SAME_FILL_PAGE,
- .divisor = 1,
- .convertv = pages_to_bytes,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/duplicate_entry",
- .chart_id = "duplicate_entry",
- .dimension = "duplicate",
- .units = "entries/s",
- .title = "Zswap duplicate store was encountered",
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_LINE,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_DUPP_ENTRY,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
-
- // The terminator
- {.filename = NULL,
- .chart_id = NULL,
- .dimension = NULL,
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_LINE,
- .enabled = CONFIG_BOOLEAN_NO,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = -1,
- .value = -1}};
-
-enum netdata_zswap_rejected {
- NETDATA_ZSWAP_REJECTED_CHART,
- NETDATA_ZSWAP_REJECTED_COMPRESS_POOR,
- NETDATA_ZSWAP_REJECTED_KMEM_FAIL,
- NETDATA_ZSWAP_REJECTED_RALLOC_FAIL,
- NETDATA_ZSWAP_REJECTED_RRECLAIM_FAIL,
-
- // Terminator
- NETDATA_ZSWAP_REJECTED_END
-};
-
-static struct netdata_zswap_metric zswap_rejected_metrics[] = {
- {.filename = "/sys/kernel/debug/zswap/",
- .chart_id = "rejections",
- .dimension = NULL,
- .units = "rejections/s",
- .title = "Zswap rejections",
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/reject_compress_poor",
- .chart_id = "reject_compress_poor",
- .dimension = "compress_poor",
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/reject_kmemcache_fail",
- .chart_id = "reject_kmemcache_fail",
- .dimension = "kmemcache_fail",
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/reject_alloc_fail",
- .chart_id = "reject_alloc_fail",
- .dimension = "alloc_fail",
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
- {.filename = "/sys/kernel/debug/zswap/reject_reclaim_fail",
- .chart_id = "reject_reclaim_fail",
- .dimension = "reclaim_fail",
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_INCREMENTAL,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_YES,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = NETDATA_CHART_PRIO_MEM_ZSWAP_REJECTS,
- .divisor = 1,
- .convertv = NULL,
- .value = -1},
-
- // The terminator
- {.filename = NULL,
- .chart_id = NULL,
- .dimension = NULL,
- .units = NULL,
- .title = NULL,
- .algorithm = RRD_ALGORITHM_ABSOLUTE,
- .charttype = RRDSET_TYPE_STACKED,
- .enabled = CONFIG_BOOLEAN_NO,
- .chart_created = CONFIG_BOOLEAN_NO,
- .prio = -1,
- .value = -1}};
-
-int zswap_collect_data(struct netdata_zswap_metric *metric)
-{
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, metric->filename);
-
- if (read_single_number_file(filename, (unsigned long long *)&metric->value)) {
- netdata_log_error("Cannot read file %s", filename);
- return 1;
- }
-
- if (metric->convertv)
- metric->value = metric->convertv(metric->value);
-
- return 0;
-}
-
-static void
-zswap_send_chart(struct netdata_zswap_metric *metric, int update_every, const char *name, const char *option)
-{
- fprintf(
- stdout,
- "CHART mem.zswap_%s '' '%s' '%s' 'zswap' '' '%s' %d %d '%s' 'debugfs.plugin' '%s'\n",
- metric->chart_id,
- metric->title,
- metric->units,
- debugfs_rrdset_type_name(metric->charttype),
- metric->prio,
- update_every,
- (!option) ? "" : option,
- name);
-}
-
-static void zswap_send_dimension(struct netdata_zswap_metric *metric)
-{
- int div = metric->divisor > 0 ? metric->divisor : 1;
- fprintf(
- stdout,
- "DIMENSION '%s' '%s' %s 1 %d ''\n",
- metric->dimension,
- metric->dimension,
- debugfs_rrd_algorithm_name(metric->algorithm),
- div);
-}
-
-static void zswap_send_begin(struct netdata_zswap_metric *metric)
-{
- fprintf(stdout, "BEGIN mem.zswap_%s\n", metric->chart_id);
-}
-
-static void zswap_send_set(struct netdata_zswap_metric *metric)
-{
- fprintf(stdout, "SET %s = %lld\n", metric->dimension, metric->value);
-}
-
-static void zswap_send_end_and_flush()
-{
- fprintf(stdout, "END\n");
- fflush(stdout);
-}
-
-static void zswap_independent_chart(struct netdata_zswap_metric *metric, int update_every, const char *name)
-{
- if (unlikely(!metric->chart_created)) {
- metric->chart_created = CONFIG_BOOLEAN_YES;
-
- zswap_send_chart(metric, update_every, name, NULL);
- zswap_send_dimension(metric);
- }
-
- zswap_send_begin(metric);
- zswap_send_set(metric);
- zswap_send_end_and_flush();
-}
-
-void zswap_reject_chart(int update_every, const char *name)
-{
- struct netdata_zswap_metric *metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
-
- if (unlikely(!metric->chart_created)) {
- metric->chart_created = CONFIG_BOOLEAN_YES;
-
- zswap_send_chart(metric, update_every, name, NULL);
- for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
- metric = &zswap_rejected_metrics[i];
- if (likely(metric->enabled))
- zswap_send_dimension(metric);
- }
- }
-
- metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
- zswap_send_begin(metric);
- for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
- metric = &zswap_rejected_metrics[i];
- if (likely(metric->enabled))
- zswap_send_set(metric);
- }
- zswap_send_end_and_flush();
-}
-
-static void zswap_obsolete_charts(int update_every, const char *name)
-{
- struct netdata_zswap_metric *metric = NULL;
-
- for (int i = 0; zswap_independent_metrics[i].filename; i++) {
- metric = &zswap_independent_metrics[i];
- if (likely(metric->chart_created))
- zswap_send_chart(metric, update_every, name, "obsolete");
- }
-
- metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
- if (likely(metric->chart_created))
- zswap_send_chart(metric, update_every, name, "obsolete");
-
- metric = &zswap_calculated_metrics[NETDATA_ZSWAP_COMPRESSION_RATIO_CHART];
- if (likely(metric->chart_created))
- zswap_send_chart(metric, update_every, name, "obsolete");
-}
-
-#define ZSWAP_STATE_SIZE 1 // Y or N
-static int debugfs_is_zswap_enabled()
-{
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "/sys/module/zswap/parameters/enabled"); // host prefix is not needed here
- char state[ZSWAP_STATE_SIZE + 1];
-
- int ret = read_file(filename, state, ZSWAP_STATE_SIZE);
-
- if (unlikely(!ret && !strcmp(state, "Y"))) {
- return 0;
- }
- return 1;
-}
-
-int do_debugfs_zswap(int update_every, const char *name)
-{
- static int check_if_enabled = 1;
-
- if (likely(check_if_enabled && debugfs_is_zswap_enabled())) {
- netdata_log_info("Zswap is disabled");
- return 1;
- }
-
- check_if_enabled = 0;
-
- system_page_size = sysconf(_SC_PAGESIZE);
- struct netdata_zswap_metric *metric = NULL;
- int enabled = 0;
-
- for (int i = 0; zswap_independent_metrics[i].filename; i++) {
- metric = &zswap_independent_metrics[i];
- if (unlikely(!metric->enabled))
- continue;
- if (unlikely(!(metric->enabled = !zswap_collect_data(metric))))
- continue;
- zswap_independent_chart(metric, update_every, name);
- enabled++;
- }
-
- struct netdata_zswap_metric *metric_size = &zswap_independent_metrics[NETDATA_ZSWAP_POOL_TOTAL_SIZE];
- struct netdata_zswap_metric *metric_raw_size = &zswap_independent_metrics[NETDATA_ZSWAP_STORED_PAGES];
- if (metric_size->enabled && metric_raw_size->enabled) {
- metric = &zswap_calculated_metrics[NETDATA_ZSWAP_COMPRESSION_RATIO_CHART];
- metric->value = 0;
- if (metric_size->value > 0)
- metric->value =
- (collected_number)((NETDATA_DOUBLE)metric_raw_size->value / (NETDATA_DOUBLE)metric_size->value * 100);
- zswap_independent_chart(metric, update_every, name);
- }
-
- int enabled_rejected = 0;
- for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
- metric = &zswap_rejected_metrics[i];
- if (unlikely(!metric->enabled))
- continue;
- if (unlikely(!(metric->enabled = !zswap_collect_data(metric))))
- continue;
- enabled++;
- enabled_rejected++;
- }
-
- if (likely(enabled_rejected > 0))
- zswap_reject_chart(update_every, name);
-
- if (unlikely(!enabled)) {
- zswap_obsolete_charts(update_every, name);
- return 1;
- }
-
- return 0;
-}
diff --git a/collectors/debugfs.plugin/integrations/linux_zswap.md b/collectors/debugfs.plugin/integrations/linux_zswap.md
deleted file mode 100644
index 44478454b..000000000
--- a/collectors/debugfs.plugin/integrations/linux_zswap.md
+++ /dev/null
@@ -1,138 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/integrations/linux_zswap.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/metadata.yaml"
-sidebar_label: "Linux ZSwap"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Linux ZSwap
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: debugfs.plugin
-Module: /sys/kernel/debug/zswap
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collects zswap performance metrics on Linux systems.
-
-
-Parse data from `debugfs file.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-This integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-Monitor the performance statistics of zswap.
-
-### Per Linux ZSwap instance
-
-Global zswap performance metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.zswap_pool_compression_ratio | compression_ratio | ratio |
-| system.zswap_pool_compressed_size | compressed_size | bytes |
-| system.zswap_pool_raw_size | uncompressed_size | bytes |
-| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |
-| system.zswap_pool_limit_hit | limit | events/s |
-| system.zswap_written_back_raw_bytes | written_back | bytes/s |
-| system.zswap_same_filled_raw_size | same_filled | bytes |
-| system.zswap_duplicate_entry | duplicate | entries/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### filesystem
-
-The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Additinal parameters for collector | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/debugfs.plugin/integrations/power_capping.md b/collectors/debugfs.plugin/integrations/power_capping.md
deleted file mode 100644
index d4b7eb890..000000000
--- a/collectors/debugfs.plugin/integrations/power_capping.md
+++ /dev/null
@@ -1,132 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/integrations/power_capping.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/metadata.yaml"
-sidebar_label: "Power Capping"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Kernel"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Power Capping
-
-
-<img src="https://netdata.cloud/img/powersupply.svg" width="150"/>
-
-
-Plugin: debugfs.plugin
-Module: intel_rapl
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collects power capping performance metrics on Linux systems.
-
-
-Parse data from `debugfs file.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-This integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-Monitor the Intel RAPL zones Consumption.
-
-### Per Power Capping instance
-
-Global Intel RAPL zones.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.powercap_intel_rapl_zone | Power | Watts |
-| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### filesystem
-
-The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Additinal parameters for collector | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md b/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md
deleted file mode 100644
index ef287bc30..000000000
--- a/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md
+++ /dev/null
@@ -1,136 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/debugfs.plugin/metadata.yaml"
-sidebar_label: "System Memory Fragmentation"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# System Memory Fragmentation
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: debugfs.plugin
-Module: /sys/kernel/debug/extfrag
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collects memory fragmentation statistics from the Linux kernel
-
-Parse data from `debugfs` file
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-This integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-Monitor the overall memory fragmentation of the system.
-
-### Per node
-
-Memory fragmentation statistics for each NUMA node in the system.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| numa_node | The NUMA node the metrics are associated with. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |
-| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |
-| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### filesystem
-
-The debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:debugfs]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Additinal parameters for collector | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/debugfs.plugin/metadata.yaml b/collectors/debugfs.plugin/metadata.yaml
deleted file mode 100644
index d3bf0a0d8..000000000
--- a/collectors/debugfs.plugin/metadata.yaml
+++ /dev/null
@@ -1,395 +0,0 @@
-plugin_name: debugfs.plugin
-modules:
- - meta:
- plugin_name: debugfs.plugin
- module_name: /sys/kernel/debug/extfrag
- monitored_instance:
- name: System Memory Fragmentation
- link: 'https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html'
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: 'microchip.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - extfrag
- - extfrag_threshold
- - memory fragmentation
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'Collects memory fragmentation statistics from the Linux kernel'
- method_description: 'Parse data from `debugfs` file'
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: false
- additional_permissions:
- description: >
- This integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible
- only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to
- debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing
- file read permission checks and directory read and execute permission checks. If file capabilities are not
- usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
- default_behavior:
- auto_detection:
- description: >
- Assuming that debugfs is mounted and the required permissions are available, this integration will
- automatically run by default.
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'filesystem'
- description: >
- The debugfs filesystem must be mounted on your host for plugin to collect data.
- You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally.
- It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem
- before starting netdata.
- configuration:
- file:
- name: 'netdata.conf'
- section_name: '[plugin:debugfs]'
- description: 'This is netdata main configuration file.'
- options:
- description: ''
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Additinal parameters for collector
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "Monitor the overall memory fragmentation of the system."
- availability: []
- scopes:
- - name: node
- description: "Memory fragmentation statistics for each NUMA node in the system."
- labels:
- - name: numa_node
- description: The NUMA node the metrics are associated with.
- metrics:
- - name: mem.fragmentation_index_dma
- description: Memory fragmentation index for each order
- unit: "index"
- chart_type: line
- dimensions:
- - name: order0
- - name: order1
- - name: order2
- - name: order3
- - name: order4
- - name: order5
- - name: order6
- - name: order7
- - name: order8
- - name: order9
- - name: order10
- - name: mem.fragmentation_index_dma32
- description: Memory fragmentation index for each order
- unit: "index"
- chart_type: line
- dimensions:
- - name: order0
- - name: order1
- - name: order2
- - name: order3
- - name: order4
- - name: order5
- - name: order6
- - name: order7
- - name: order8
- - name: order9
- - name: order10
- - name: mem.fragmentation_index_normal
- description: Memory fragmentation index for each order
- unit: "index"
- chart_type: line
- dimensions:
- - name: order0
- - name: order1
- - name: order2
- - name: order3
- - name: order4
- - name: order5
- - name: order6
- - name: order7
- - name: order8
- - name: order9
- - name: order10
- - meta:
- plugin_name: debugfs.plugin
- module_name: /sys/kernel/debug/zswap
- monitored_instance:
- name: Linux ZSwap
- link: 'https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html'
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: 'microchip.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - swap
- - zswap
- - frontswap
- - swap cache
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Collects zswap performance metrics on Linux systems.
- method_description: 'Parse data from `debugfs file.'
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: false
- additional_permissions:
- description: >
- This integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible
- only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to
- debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing
- file read permission checks and directory read and execute permission checks. If file capabilities are not
- usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
- default_behavior:
- auto_detection:
- description: >
- Assuming that debugfs is mounted and the required permissions are available, this integration will
- automatically detect whether or not the system is using zswap.
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'filesystem'
- description: >
- The debugfs filesystem must be mounted on your host for plugin to collect data.
- You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally.
- It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem
- before starting netdata.
- configuration:
- file:
- name: 'netdata.conf'
- section_name: '[plugin:debugfs]'
- description: 'This is netdata main configuration file.'
- options:
- description: ''
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Additinal parameters for collector
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "Monitor the performance statistics of zswap."
- availability: []
- scopes:
- - name: global
- description: "Global zswap performance metrics."
- labels: []
- metrics:
- - name: system.zswap_pool_compression_ratio
- description: Zswap compression ratio
- unit: "ratio"
- chart_type: line
- dimensions:
- - name: compression_ratio
- - name: system.zswap_pool_compressed_size
- description: Zswap compressed bytes currently stored
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: compressed_size
- - name: system.zswap_pool_raw_size
- description: Zswap uncompressed bytes currently stored
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: uncompressed_size
- - name: system.zswap_rejections
- description: Zswap rejections
- unit: "rejections/s"
- chart_type: stacked
- dimensions:
- - name: compress_poor
- - name: kmemcache_fail
- - name: alloc_fail
- - name: reclaim_fail
- - name: system.zswap_pool_limit_hit
- description: Zswap pool limit was reached
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: limit
- - name: system.zswap_written_back_raw_bytes
- description: Zswap uncomressed bytes written back when pool limit was reached
- unit: "bytes/s"
- chart_type: area
- dimensions:
- - name: written_back
- - name: system.zswap_same_filled_raw_size
- description: Zswap same-value filled uncompressed bytes currently stored
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: same_filled
- - name: system.zswap_duplicate_entry
- description: Zswap duplicate store was encountered
- unit: "entries/s"
- chart_type: line
- dimensions:
- - name: duplicate
- - meta:
- plugin_name: debugfs.plugin
- module_name: intel_rapl
- monitored_instance:
- name: Power Capping
- link: 'https://www.kernel.org/doc/html/next/power/powercap/powercap.html'
- categories:
- - data-collection.linux-systems.kernel-metrics
- icon_filename: 'powersupply.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - power capping
- - energy
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Collects power capping performance metrics on Linux systems.
- method_description: 'Parse data from `debugfs file.'
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: false
- additional_permissions:
- description: >
- This integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible
- only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to
- debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing
- file read permission checks and directory read and execute permission checks. If file capabilities are not
- usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
- default_behavior:
- auto_detection:
- description: >
- Assuming that debugfs is mounted and the required permissions are available, this integration will
- automatically detect whether or not the system is using zswap.
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'filesystem'
- description: >
- The debugfs filesystem must be mounted on your host for plugin to collect data.
- You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally.
- It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem
- before starting netdata.
- configuration:
- file:
- name: 'netdata.conf'
- section_name: '[plugin:debugfs]'
- description: 'This is netdata main configuration file.'
- options:
- description: ''
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Additinal parameters for collector
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "Monitor the Intel RAPL zones Consumption."
- availability: []
- scopes:
- - name: global
- description: "Global Intel RAPL zones."
- labels: []
- metrics:
- - name: cpu.powercap_intel_rapl_zone
- description: Intel RAPL Zone Power Consumption
- unit: "Watts"
- chart_type: line
- dimensions:
- - name: Power
- - name: cpu.powercap_intel_rapl_subzones
- description: Intel RAPL Subzones Power Consumption
- unit: "Watts"
- chart_type: line
- dimensions:
- - name: dram
- - name: core
- - name: uncore
diff --git a/collectors/debugfs.plugin/sys_devices_virtual_powercap.c b/collectors/debugfs.plugin/sys_devices_virtual_powercap.c
deleted file mode 100644
index ee261c27f..000000000
--- a/collectors/debugfs.plugin/sys_devices_virtual_powercap.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "debugfs_plugin.h"
-
-struct zone_t {
- char *zone_chart_id;
- char *subzone_chart_id;
- char *name;
- char *path;
-
- unsigned long long max_energy_range_uj;
- unsigned long long energy_uj;
-
- struct zone_t *subzones;
-
- struct zone_t *prev, *next;
-};
-
-static struct zone_t *rapl_zones = NULL;
-
-static bool get_measurement(const char *path, unsigned long long *energy_uj) {
- return read_single_number_file(path, energy_uj) == 0;
-}
-
-static struct zone_t *get_rapl_zone(const char *control_type __maybe_unused, struct zone_t *parent __maybe_unused, const char *dirname) {
- char temp[FILENAME_MAX + 1];
- snprintfz(temp, FILENAME_MAX, "%s/%s", dirname, "name");
-
- char name[FILENAME_MAX + 1] = "";
- if (read_file(temp, name, sizeof(name) - 1) != 0)
- return NULL;
-
- char *trimmed = trim(name);
- if (unlikely(trimmed == NULL || trimmed[0] == 0))
- return NULL;
-
- snprintfz(temp, FILENAME_MAX, "%s/%s", dirname, "max_energy_range_uj");
- unsigned long long max_energy_range_uj = 0;
- if (unlikely(read_single_number_file(temp, &max_energy_range_uj) != 0)) {
- collector_error("Cannot read %s", temp);
- return NULL;
- }
-
- snprintfz(temp, FILENAME_MAX, "%s/%s", dirname, "energy_uj");
- unsigned long long energy_uj;
- if (unlikely(!get_measurement(temp, &energy_uj))) {
- collector_info("%s: Cannot read %s", trimmed, temp);
- return NULL;
- }
-
- struct zone_t *zone = callocz(1, sizeof(*zone));
-
- zone->name = strdupz(trimmed);
- zone->path = strdupz(temp);
-
- zone->max_energy_range_uj = max_energy_range_uj;
- zone->energy_uj = energy_uj;
-
- collector_info("Found zone: \"%s\"", zone->name);
-
- return zone;
-}
-
-static struct zone_t *look_for_rapl_zones(const char *control_type, struct zone_t *parent, const char *path, int depth) {
- if(depth > 2)
- return NULL;
-
- struct zone_t *base = NULL;
-
- DIR *dir = opendir(path);
- if (unlikely(dir == NULL))
- return NULL;
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type != DT_DIR || de->d_name[0] == '.')
- continue;
-
- if(strncmp(de->d_name, "intel-rapl:", 11) != 0)
- continue;
-
- char zone_path[FILENAME_MAX + 1];
- snprintfz(zone_path, FILENAME_MAX, "%s/%s", path, de->d_name);
-
- struct zone_t *zone = get_rapl_zone(control_type, parent, zone_path);
- if(zone) {
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, zone, prev, next);
-
- if(!parent)
- zone->subzones = look_for_rapl_zones(control_type, zone, zone_path, depth + 1);
- }
- }
-
- closedir(dir);
- return base;
-}
-
-static struct zone_t *get_main_rapl_zones(void) {
- struct zone_t *base = NULL;
-
- char dirname[FILENAME_MAX + 1];
- snprintfz(dirname, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/powercap");
-
- DIR *dir = opendir(dirname);
- if (unlikely(dir == NULL))
- return 0;
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type != DT_DIR || de->d_name[0] == '.')
- continue;
-
- if(strncmp(de->d_name, "intel-rapl", 10) != 0)
- continue;
-
- char control_type_path[FILENAME_MAX + 1];
- snprintfz(control_type_path, FILENAME_MAX, "%s/%s", dirname, de->d_name);
-
- collector_info("Looking at control type \"%s\"", de->d_name);
- struct zone_t *zone = look_for_rapl_zones(de->d_name, NULL, control_type_path, 0);
- if(zone)
- DOUBLE_LINKED_LIST_APPEND_LIST_UNSAFE(base, zone, prev, next);
- }
- closedir(dir);
-
- return base;
-}
-
-int do_sys_devices_virtual_powercap(int update_every, const char *name __maybe_unused) {
-
- if (unlikely(!rapl_zones)) {
- rapl_zones = get_main_rapl_zones();
- if (unlikely(!rapl_zones)) {
- collector_info("Failed to find powercap zones.");
- return 1;
- }
- }
-
- for(struct zone_t *zone = rapl_zones; zone ; zone = zone->next) {
- if(!zone->zone_chart_id) {
- char id[1000 + 1];
- snprintf(id, 1000, "cpu.powercap_intel_rapl_zone_%s", zone->name);
- zone->zone_chart_id = strdupz(id);
-
- fprintf(stdout,
- "CHART '%s' '' 'Intel RAPL Zone Power Consumption' 'Watts' 'powercap' '%s' '%s' %d %d '' 'debugfs.plugin' 'intel_rapl'\n",
- zone->zone_chart_id,
- "cpu.powercap_intel_rapl_zone",
- debugfs_rrdset_type_name(RRDSET_TYPE_LINE),
- NETDATA_CHART_PRIO_POWERCAP,
- update_every);
-
- fprintf(stdout,
- "CLABEL 'zone' '%s' 1\n"
- "CLABEL_COMMIT\n",
- zone->name);
-
- fprintf(stdout,
- "DIMENSION 'power' '' %s 1 1000000 ''\n",
- debugfs_rrd_algorithm_name(RRD_ALGORITHM_INCREMENTAL));
-
- // for the sub-zones
- snprintf(id, 1000, "cpu.powercap_intel_rapl_subzones_%s", zone->name);
- zone->subzone_chart_id = strdupz(id);
- fprintf(stdout,
- "CHART '%s' '' 'Intel RAPL Subzones Power Consumption' 'Watts' 'powercap' '%s' '%s' %d %d '' 'debugfs.plugin' 'intel_rapl'\n",
- zone->subzone_chart_id,
- "cpu.powercap_intel_rapl_subzones",
- debugfs_rrdset_type_name(RRDSET_TYPE_LINE),
- NETDATA_CHART_PRIO_POWERCAP + 1,
- update_every);
-
- fprintf(stdout,
- "CLABEL 'zone' '%s' 1\n"
- "CLABEL_COMMIT\n",
- zone->name);
-
- for(struct zone_t *subzone = zone->subzones; subzone ; subzone = subzone->next) {
- fprintf(stdout,
- "DIMENSION '%s' '' %s 1 1000000 ''\n",
- subzone->name,
- debugfs_rrd_algorithm_name(RRD_ALGORITHM_INCREMENTAL));
- }
- }
-
- if(get_measurement(zone->path, &zone->energy_uj)) {
- fprintf(stdout,
- "BEGIN '%s'\n"
- "SET power = %llu\n"
- "END\n"
- , zone->zone_chart_id
- , zone->energy_uj);
- }
-
- if(zone->subzones) {
- fprintf(stdout,
- "BEGIN '%s'\n",
- zone->subzone_chart_id);
-
- for (struct zone_t *subzone = zone->subzones; subzone; subzone = subzone->next) {
- if(get_measurement(subzone->path, &subzone->energy_uj)) {
- fprintf(stdout,
- "SET '%s' = %llu\n",
- subzone->name,
- subzone->energy_uj);
- }
- }
-
- fprintf(stdout, "END\n");
- }
-
- }
-
- fflush(stdout);
-
- return 0;
-}
diff --git a/collectors/diskspace.plugin/Makefile.am b/collectors/diskspace.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/diskspace.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
deleted file mode 120000
index c9f4e1c5e..000000000
--- a/collectors/diskspace.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/disk_space.md \ No newline at end of file
diff --git a/collectors/diskspace.plugin/integrations/disk_space.md b/collectors/diskspace.plugin/integrations/disk_space.md
deleted file mode 100644
index 1c937ed7f..000000000
--- a/collectors/diskspace.plugin/integrations/disk_space.md
+++ /dev/null
@@ -1,140 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/diskspace.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/diskspace.plugin/metadata.yaml"
-sidebar_label: "Disk space"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Disk space
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: diskspace.plugin
-Module: diskspace.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per mount point
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| mount_point | Path used to mount a filesystem |
-| filesystem | The filesystem used to format a partition. |
-| mount_root | Root directory where mount points are present. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.space | avail, used, reserved_for_root | GiB |
-| disk.inodes | avail, used, reserved_for_root | inodes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |
-| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-You can also specify per mount point `[plugin:proc:diskspace:mountpoint]`
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |
-| check for new mount points every | Parse proc files frequency. | 15 | no |
-| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |
-| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |
-| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |
-| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
-| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/diskspace.plugin/metadata.yaml b/collectors/diskspace.plugin/metadata.yaml
deleted file mode 100644
index cb45edf34..000000000
--- a/collectors/diskspace.plugin/metadata.yaml
+++ /dev/null
@@ -1,139 +0,0 @@
-plugin_name: diskspace.plugin
-modules:
- - meta:
- plugin_name: diskspace.plugin
- module_name: diskspace.plugin
- monitored_instance:
- name: Disk space
- link: ""
- categories:
- - data-collection.linux-systems
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list:
- - plugin_name: ebpf.plugin
- module_name: disk
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - disk
- - I/O
- - space
- - inode
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:proc:diskspace]"
- description: "This is netdata main configuration file"
- options:
- description: "You can also specify per mount point `[plugin:proc:diskspace:mountpoint]`"
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: remove charts of unmounted disks
- description: Remove chart when a device is unmounted on host.
- default_value: yes
- required: false
- - name: check for new mount points every
- description: Parse proc files frequency.
- default_value: 15
- required: false
- - name: exclude space metrics on paths
- description: Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern.
- default_value: /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*
- required: false
- - name: exclude space metrics on filesystems
- description: Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern.
- default_value: "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs"
- required: false
- - name: exclude inode metrics on filesystems
- description: Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern.
- default_value: msdosfs msdos vfat overlayfs aufs* *unionfs
- required: false
- - name: space usage for all disks
- description: Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option.
- default_value: auto
- required: false
- - name: inodes usage for all disks
- description: Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option.
- default_value: auto
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: disk_space_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.space
- info: disk ${label:mount_point} space utilization
- os: "linux freebsd"
- - name: disk_inode_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.inodes
- info: disk ${label:mount_point} inode utilization
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: mount point
- description: ""
- labels:
- - name: mount_point
- description: Path used to mount a filesystem
- - name: filesystem
- description: The filesystem used to format a partition.
- - name: mount_root
- description: Root directory where mount points are present.
- metrics:
- - name: disk.space
- description: Disk Space Usage
- unit: "GiB"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
- - name: disk.inodes
- description: Disk Files (inodes) Usage
- unit: "inodes"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c
deleted file mode 100644
index 94257810c..000000000
--- a/collectors/diskspace.plugin/plugin_diskspace.c
+++ /dev/null
@@ -1,945 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "../proc.plugin/plugin_proc.h"
-
-#define PLUGIN_DISKSPACE_NAME "diskspace.plugin"
-
-#define DEFAULT_EXCLUDED_PATHS "/proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/*"
-#define DEFAULT_EXCLUDED_FILESYSTEMS "*gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs"
-#define DEFAULT_EXCLUDED_FILESYSTEMS_INODES "msdosfs msdos vfat overlayfs aufs* *unionfs"
-#define CONFIG_SECTION_DISKSPACE "plugin:proc:diskspace"
-
-#define RRDFUNCTIONS_DISKSPACE_HELP "View mount point statistics"
-
-#define MAX_STAT_USEC 10000LU
-#define SLOW_UPDATE_EVERY 5
-
-static netdata_thread_t *diskspace_slow_thread = NULL;
-
-static struct mountinfo *disk_mountinfo_root = NULL;
-static int check_for_new_mountpoints_every = 15;
-static int cleanup_mount_points = 1;
-
-static inline void mountinfo_reload(int force) {
- static time_t last_loaded = 0;
- time_t now = now_realtime_sec();
-
- if(force || now - last_loaded >= check_for_new_mountpoints_every) {
- // mountinfo_free_all() can be called with NULL disk_mountinfo_root
- mountinfo_free_all(disk_mountinfo_root);
-
- // re-read mountinfo in case something changed
- disk_mountinfo_root = mountinfo_read(0);
-
- last_loaded = now;
- }
-}
-
-// Data to be stored in DICTIONARY dict_mountpoints used by do_disk_space_stats().
-// This DICTIONARY is used to lookup the settings of the mount point on each iteration.
-struct mount_point_metadata {
- int do_space;
- int do_inodes;
- int shown_error;
- int updated;
- int slow;
-
- bool function_ready;
-
- STRING *filesystem;
- STRING *mountroot;
-
- RRDLABELS *chart_labels;
-
- size_t collected; // the number of times this has been collected
-
- RRDSET *st_space;
- RRDDIM *rd_space_used;
- RRDDIM *rd_space_avail;
- RRDDIM *rd_space_reserved;
-
- RRDSET *st_inodes;
- RRDDIM *rd_inodes_used;
- RRDDIM *rd_inodes_avail;
- RRDDIM *rd_inodes_reserved;
-};
-
-static DICTIONARY *dict_mountpoints = NULL;
-
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
-
-int mount_point_cleanup(const char *name, void *entry, int slow) {
- (void)name;
-
- struct mount_point_metadata *mp = (struct mount_point_metadata *)entry;
- if(!mp) return 0;
-
- if (slow != mp->slow)
- return 0;
-
- if(likely(mp->updated)) {
- mp->updated = 0;
- return 0;
- }
-
- if(likely(cleanup_mount_points && mp->collected)) {
- mp->function_ready = false;
- mp->collected = 0;
- mp->updated = 0;
- mp->shown_error = 0;
-
- string_freez(mp->filesystem);
- string_freez(mp->mountroot);
-
- rrdset_obsolete_and_pointer_null(mp->st_space);
- rrdset_obsolete_and_pointer_null(mp->st_inodes);
-
- mp->rd_space_avail = NULL;
- mp->rd_space_used = NULL;
- mp->rd_space_reserved = NULL;
-
- mp->rd_inodes_avail = NULL;
- mp->rd_inodes_used = NULL;
- mp->rd_inodes_reserved = NULL;
- }
-
- return 0;
-}
-
-int mount_point_cleanup_cb(const DICTIONARY_ITEM *item, void *entry, void *data __maybe_unused) {
- const char *name = dictionary_acquired_item_name(item);
-
- return mount_point_cleanup(name, (struct mount_point_metadata *)entry, 0);
-}
-
-// a copy of basic mountinfo fields
-struct basic_mountinfo {
- char *persistent_id;
- char *root;
- char *mount_point;
- char *filesystem;
-
- struct basic_mountinfo *next;
-};
-
-static struct basic_mountinfo *slow_mountinfo_tmp_root = NULL;
-static netdata_mutex_t slow_mountinfo_mutex;
-
-static struct basic_mountinfo *basic_mountinfo_create_and_copy(struct mountinfo* mi)
-{
- struct basic_mountinfo *bmi = callocz(1, sizeof(struct basic_mountinfo));
-
- if (mi) {
- bmi->persistent_id = strdupz(mi->persistent_id);
- bmi->root = strdupz(mi->root);
- bmi->mount_point = strdupz(mi->mount_point);
- bmi->filesystem = strdupz(mi->filesystem);
- }
-
- return bmi;
-}
-
-static void add_basic_mountinfo(struct basic_mountinfo **root, struct mountinfo *mi)
-{
- if (!root)
- return;
-
- struct basic_mountinfo *bmi = basic_mountinfo_create_and_copy(mi);
-
- bmi->next = *root;
- *root = bmi;
-};
-
-static void free_basic_mountinfo(struct basic_mountinfo *bmi)
-{
- if (bmi) {
- freez(bmi->persistent_id);
- freez(bmi->root);
- freez(bmi->mount_point);
- freez(bmi->filesystem);
-
- freez(bmi);
- }
-};
-
-static void free_basic_mountinfo_list(struct basic_mountinfo *root)
-{
- struct basic_mountinfo *bmi = root, *next;
-
- while (bmi) {
- next = bmi->next;
- free_basic_mountinfo(bmi);
- bmi = next;
- }
-}
-
-static void calculate_values_and_show_charts(
- struct basic_mountinfo *mi,
- struct mount_point_metadata *m,
- struct statvfs *buff_statvfs,
- int update_every)
-{
- const char *family = mi->mount_point;
- const char *disk = mi->persistent_id;
-
- // logic found at get_fs_usage() in coreutils
- unsigned long bsize = (buff_statvfs->f_frsize) ? buff_statvfs->f_frsize : buff_statvfs->f_bsize;
-
- fsblkcnt_t bavail = buff_statvfs->f_bavail;
- fsblkcnt_t btotal = buff_statvfs->f_blocks;
- fsblkcnt_t bavail_root = buff_statvfs->f_bfree;
- fsblkcnt_t breserved_root = bavail_root - bavail;
- fsblkcnt_t bused = likely(btotal >= bavail_root) ? btotal - bavail_root : bavail_root - btotal;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(btotal != bavail + breserved_root + bused))
- collector_error("DISKSPACE: disk block statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)btotal, (unsigned long long)bavail, (unsigned long long)breserved_root, (unsigned long long)bused);
-#endif
-
- // --------------------------------------------------------------------------
-
- fsfilcnt_t favail = buff_statvfs->f_favail;
- fsfilcnt_t ftotal = buff_statvfs->f_files;
- fsfilcnt_t favail_root = buff_statvfs->f_ffree;
- fsfilcnt_t freserved_root = favail_root - favail;
- fsfilcnt_t fused = ftotal - favail_root;
-
- if(m->do_inodes == CONFIG_BOOLEAN_AUTO && favail == (fsfilcnt_t)-1) {
- // this file system does not support inodes reporting
- // eg. cephfs
- m->do_inodes = CONFIG_BOOLEAN_NO;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(btotal != bavail + breserved_root + bused))
- collector_error("DISKSPACE: disk inode statistics for '%s' (disk '%s') do not sum up: total = %llu, available = %llu, reserved = %llu, used = %llu", mi->mount_point, disk, (unsigned long long)ftotal, (unsigned long long)favail, (unsigned long long)freserved_root, (unsigned long long)fused);
-#endif
-
- int rendered = 0;
-
- if(m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO &&
- (bavail || breserved_root || bused ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if(unlikely(!m->st_space) || m->st_space->update_every != update_every) {
- m->do_space = CONFIG_BOOLEAN_YES;
- m->st_space = rrdset_find_active_bytype_localhost("disk_space", disk);
- if(unlikely(!m->st_space || m->st_space->update_every != update_every)) {
- char title[4096 + 1];
- snprintfz(title, sizeof(title) - 1, "Disk Space Usage");
- m->st_space = rrdset_create_localhost(
- "disk_space"
- , disk
- , NULL
- , family
- , "disk.space"
- , title
- , "GiB"
- , PLUGIN_DISKSPACE_NAME
- , NULL
- , NETDATA_CHART_PRIO_DISKSPACE_SPACE
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- rrdset_update_rrdlabels(m->st_space, m->chart_labels);
-
- m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_used = rrddim_add(m->st_space, "used", NULL, (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root", (collected_number)bsize, 1024 * 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number)bavail);
- rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number)bused);
- rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number)breserved_root);
- rrdset_done(m->st_space);
-
- rendered++;
- }
-
- if(m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO &&
- (favail || freserved_root || fused ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if(unlikely(!m->st_inodes) || m->st_inodes->update_every != update_every) {
- m->do_inodes = CONFIG_BOOLEAN_YES;
- m->st_inodes = rrdset_find_active_bytype_localhost("disk_inodes", disk);
- if(unlikely(!m->st_inodes) || m->st_inodes->update_every != update_every) {
- char title[4096 + 1];
- snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage");
- m->st_inodes = rrdset_create_localhost(
- "disk_inodes"
- , disk
- , NULL
- , family
- , "disk.inodes"
- , title
- , "inodes"
- , PLUGIN_DISKSPACE_NAME
- , NULL
- , NETDATA_CHART_PRIO_DISKSPACE_INODES
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- rrdset_update_rrdlabels(m->st_inodes, m->chart_labels);
-
- m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- m->rd_inodes_reserved = rrddim_add(m->st_inodes, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number)favail);
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number)fused);
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_reserved, (collected_number)freserved_root);
- rrdset_done(m->st_inodes);
-
- rendered++;
- }
-
- m->function_ready = rendered > 0;
-
- if(likely(rendered))
- m->collected++;
-}
-
-static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) {
- const char *disk = mi->persistent_id;
-
- static SIMPLE_PATTERN *excluded_mountpoints = NULL;
- static SIMPLE_PATTERN *excluded_filesystems = NULL;
- static SIMPLE_PATTERN *excluded_filesystems_inodes = NULL;
-
- usec_t slow_timeout = MAX_STAT_USEC * update_every;
-
- int do_space, do_inodes;
-
- if(unlikely(!dict_mountpoints)) {
- SIMPLE_PREFIX_MODE mode = SIMPLE_PATTERN_EXACT;
-
- if(config_move("plugin:proc:/proc/diskstats", "exclude space metrics on paths", CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths") != -1) {
- // old configuration, enable backwards compatibility
- mode = SIMPLE_PATTERN_PREFIX;
- }
-
- excluded_mountpoints = simple_pattern_create(
- config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on paths", DEFAULT_EXCLUDED_PATHS),
- NULL,
- mode,
- true);
-
- excluded_filesystems = simple_pattern_create(
- config_get(CONFIG_SECTION_DISKSPACE, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
-
- excluded_filesystems_inodes = simple_pattern_create(
- config_get(CONFIG_SECTION_DISKSPACE, "exclude inode metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS_INODES),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
-
- dict_mountpoints = dictionary_create_advanced(DICT_OPTION_NONE, &dictionary_stats_category_collectors, 0);
- }
-
- struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point);
- if(unlikely(!m)) {
- int slow = 0;
-
- int def_space = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
- int def_inodes = config_get_boolean_ondemand(CONFIG_SECTION_DISKSPACE, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
-
- if(unlikely(simple_pattern_matches(excluded_mountpoints, mi->mount_point))) {
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
-
- if(unlikely(simple_pattern_matches(excluded_filesystems, mi->filesystem))) {
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
- if (unlikely(simple_pattern_matches(excluded_filesystems_inodes, mi->filesystem))) {
- def_inodes = CONFIG_BOOLEAN_NO;
- }
-
- // check if the mount point is a directory #2407
- // but only when it is enabled by default #4491
- if(def_space != CONFIG_BOOLEAN_NO || def_inodes != CONFIG_BOOLEAN_NO) {
- usec_t start_time = now_monotonic_high_precision_usec();
- struct stat bs;
-
- if(stat(mi->mount_point, &bs) == -1) {
- collector_error("DISKSPACE: Cannot stat() mount point '%s' (disk '%s', filesystem '%s', root '%s')."
- , mi->mount_point
- , disk
- , mi->filesystem?mi->filesystem:""
- , mi->root?mi->root:""
- );
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
- else {
- if((bs.st_mode & S_IFMT) != S_IFDIR) {
- collector_error("DISKSPACE: Mount point '%s' (disk '%s', filesystem '%s', root '%s') is not a directory."
- , mi->mount_point
- , disk
- , mi->filesystem?mi->filesystem:""
- , mi->root?mi->root:""
- );
- def_space = CONFIG_BOOLEAN_NO;
- def_inodes = CONFIG_BOOLEAN_NO;
- }
- }
-
- if ((now_monotonic_high_precision_usec() - start_time) > slow_timeout)
- slow = 1;
- }
-
- char var_name[4096 + 1];
- snprintfz(var_name, 4096, "plugin:proc:diskspace:%s", mi->mount_point);
-
- do_space = def_space;
- do_inodes = def_inodes;
-
- if (config_exists(var_name, "space usage"))
- do_space = config_get_boolean_ondemand(var_name, "space usage", def_space);
- if (config_exists(var_name, "inodes usage"))
- do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", def_inodes);
-
- struct mount_point_metadata mp = {
- .do_space = do_space,
- .do_inodes = do_inodes,
- .shown_error = 0,
- .updated = 0,
- .slow = 0,
-
- .collected = 0,
-
- .st_space = NULL,
- .rd_space_avail = NULL,
- .rd_space_used = NULL,
- .rd_space_reserved = NULL,
-
- .st_inodes = NULL,
- .rd_inodes_avail = NULL,
- .rd_inodes_used = NULL,
- .rd_inodes_reserved = NULL
- };
-
- mp.filesystem = string_strdupz(mi->filesystem);
- mp.mountroot = string_strdupz(mi->root);
-
- mp.chart_labels = rrdlabels_create();
- rrdlabels_add(mp.chart_labels, "mount_point", mi->mount_point, RRDLABEL_SRC_AUTO);
- rrdlabels_add(mp.chart_labels, "filesystem", mi->filesystem, RRDLABEL_SRC_AUTO);
- rrdlabels_add(mp.chart_labels, "mount_root", mi->root, RRDLABEL_SRC_AUTO);
-
- m = dictionary_set(dict_mountpoints, mi->mount_point, &mp, sizeof(struct mount_point_metadata));
-
- m->slow = slow;
- }
-
- if (m->slow) {
- add_basic_mountinfo(&slow_mountinfo_tmp_root, mi);
- return;
- }
-
- m->updated = 1;
-
- if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO))
- return;
-
- if (unlikely(
- mi->flags & MOUNTINFO_READONLY &&
- !(mi->flags & MOUNTINFO_IS_IN_SYSD_PROTECTED_LIST) &&
- !m->collected &&
- m->do_space != CONFIG_BOOLEAN_YES &&
- m->do_inodes != CONFIG_BOOLEAN_YES))
- return;
-
- usec_t start_time = now_monotonic_high_precision_usec();
- struct statvfs buff_statvfs;
-
- if (statvfs(mi->mount_point, &buff_statvfs) < 0) {
- if(!m->shown_error) {
- collector_error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')"
- , mi->mount_point
- , disk
- , mi->filesystem?mi->filesystem:""
- , mi->root?mi->root:""
- );
- m->shown_error = 1;
- }
- return;
- }
-
- if ((now_monotonic_high_precision_usec() - start_time) > slow_timeout)
- m->slow = 1;
-
- m->shown_error = 0;
-
- struct basic_mountinfo bmi;
- bmi.mount_point = mi->mount_point;
- bmi.persistent_id = mi->persistent_id;
- bmi.filesystem = mi->filesystem;
- bmi.root = mi->root;
-
- calculate_values_and_show_charts(&bmi, m, &buff_statvfs, update_every);
-}
-
-static inline void do_slow_disk_space_stats(struct basic_mountinfo *mi, int update_every) {
- struct mount_point_metadata *m = dictionary_get(dict_mountpoints, mi->mount_point);
-
- m->updated = 1;
-
- struct statvfs buff_statvfs;
- if (statvfs(mi->mount_point, &buff_statvfs) < 0) {
- if(!m->shown_error) {
- collector_error("DISKSPACE: failed to statvfs() mount point '%s' (disk '%s', filesystem '%s', root '%s')"
- , mi->mount_point
- , mi->persistent_id
- , mi->filesystem?mi->filesystem:""
- , mi->root?mi->root:""
- );
- m->shown_error = 1;
- }
- return;
- }
- m->shown_error = 0;
-
- calculate_values_and_show_charts(mi, m, &buff_statvfs, update_every);
-}
-
-static void diskspace_slow_worker_cleanup(void *ptr)
-{
- UNUSED(ptr);
-
- collector_info("cleaning up...");
-
- worker_unregister();
-}
-
-#define WORKER_JOB_SLOW_MOUNTPOINT 0
-#define WORKER_JOB_SLOW_CLEANUP 1
-
-struct slow_worker_data {
- netdata_thread_t *slow_thread;
- int update_every;
-};
-
-void *diskspace_slow_worker(void *ptr)
-{
- struct slow_worker_data *data = (struct slow_worker_data *)ptr;
-
- worker_register("DISKSPACE_SLOW");
- worker_register_job_name(WORKER_JOB_SLOW_MOUNTPOINT, "mountpoint");
- worker_register_job_name(WORKER_JOB_SLOW_CLEANUP, "cleanup");
-
- struct basic_mountinfo *slow_mountinfo_root = NULL;
-
- int slow_update_every = data->update_every > SLOW_UPDATE_EVERY ? data->update_every : SLOW_UPDATE_EVERY;
-
- netdata_thread_cleanup_push(diskspace_slow_worker_cleanup, data->slow_thread);
-
- usec_t step = slow_update_every * USEC_PER_SEC;
- usec_t real_step = USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- while(service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- heartbeat_next(&hb, USEC_PER_SEC);
-
- if (real_step < step) {
- real_step += USEC_PER_SEC;
- continue;
- }
- real_step = USEC_PER_SEC;
-
- usec_t start_time = now_monotonic_high_precision_usec();
-
- if (!dict_mountpoints)
- continue;
-
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
-
- // --------------------------------------------------------------------------
- // disk space metrics
-
- worker_is_busy(WORKER_JOB_SLOW_MOUNTPOINT);
-
- netdata_mutex_lock(&slow_mountinfo_mutex);
- free_basic_mountinfo_list(slow_mountinfo_root);
- slow_mountinfo_root = slow_mountinfo_tmp_root;
- slow_mountinfo_tmp_root = NULL;
- netdata_mutex_unlock(&slow_mountinfo_mutex);
-
- struct basic_mountinfo *bmi;
- for(bmi = slow_mountinfo_root; bmi; bmi = bmi->next) {
- do_slow_disk_space_stats(bmi, slow_update_every);
-
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
- }
-
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
-
- worker_is_busy(WORKER_JOB_SLOW_CLEANUP);
-
- for(bmi = slow_mountinfo_root; bmi; bmi = bmi->next) {
- struct mount_point_metadata *m = dictionary_get(dict_mountpoints, bmi->mount_point);
-
- if (m)
- mount_point_cleanup(bmi->mount_point, m, 1);
- }
-
- usec_t dt = now_monotonic_high_precision_usec() - start_time;
- if (dt > step) {
- slow_update_every = (dt / USEC_PER_SEC) * 3 / 2;
- if (slow_update_every % SLOW_UPDATE_EVERY)
- slow_update_every += SLOW_UPDATE_EVERY - slow_update_every % SLOW_UPDATE_EVERY;
- step = slow_update_every * USEC_PER_SEC;
- }
- }
-
- netdata_thread_cleanup_pop(1);
-
- free_basic_mountinfo_list(slow_mountinfo_root);
-
- return NULL;
-}
-
-static void diskspace_main_cleanup(void *ptr) {
- rrd_collector_finished();
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- if (diskspace_slow_thread) {
- netdata_thread_join(*diskspace_slow_thread, NULL);
- freez(diskspace_slow_thread);
- }
-
- free_basic_mountinfo_list(slow_mountinfo_tmp_root);
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-#define WORKER_JOB_MOUNTINFO 0
-#define WORKER_JOB_MOUNTPOINT 1
-#define WORKER_JOB_CLEANUP 2
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 3
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3
-#endif
-
-int diskspace_function_mount_points(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
- void *collector_data __maybe_unused,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
- void *register_canceller_cb_data __maybe_unused) {
-
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
-
- buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_DISKSPACE_HELP);
- buffer_json_member_add_array(wb, "data");
-
- double max_space_util = 0.0;
- double max_space_avail = 0.0;
- double max_space_used = 0.0;
- double max_space_reserved = 0.0;
-
- double max_inodes_util = 0.0;
- double max_inodes_avail = 0.0;
- double max_inodes_used = 0.0;
- double max_inodes_reserved = 0.0;
-
- struct mount_point_metadata *mp;
- dfe_start_write(dict_mountpoints, mp) {
- if (!mp->function_ready)
- continue;
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, mp_dfe.name);
- buffer_json_add_array_item_string(wb, string2str(mp->filesystem));
- buffer_json_add_array_item_string(wb, string2str(mp->mountroot));
-
- double space_avail = rrddim_get_last_stored_value(mp->rd_space_avail, &max_space_avail, 1.0);
- double space_used = rrddim_get_last_stored_value(mp->rd_space_used, &max_space_used, 1.0);
- double space_reserved = rrddim_get_last_stored_value(mp->rd_space_reserved, &max_space_reserved, 1.0);
- double inodes_avail = rrddim_get_last_stored_value(mp->rd_inodes_avail, &max_inodes_avail, 1.0);
- double inodes_used = rrddim_get_last_stored_value(mp->rd_inodes_used, &max_inodes_used, 1.0);
- double inodes_reserved = rrddim_get_last_stored_value(mp->rd_inodes_reserved, &max_inodes_reserved, 1.0);
-
- double space_util = NAN;
- if (!isnan(space_avail) && !isnan(space_used)) {
- space_util = space_avail + space_used > 0 ? space_used * 100.0 / (space_avail + space_used) : 0;
- max_space_util = MAX(max_space_util, space_util);
- }
- double inodes_util = NAN;
- if (!isnan(inodes_avail) && !isnan(inodes_used)) {
- inodes_util = inodes_avail + inodes_used > 0 ? inodes_used * 100.0 / (inodes_avail + inodes_used) : 0;
- max_inodes_util = MAX(max_inodes_util, inodes_util);
- }
-
- buffer_json_add_array_item_double(wb, space_util);
- buffer_json_add_array_item_double(wb, space_avail);
- buffer_json_add_array_item_double(wb, space_used);
- buffer_json_add_array_item_double(wb, space_reserved);
-
- buffer_json_add_array_item_double(wb, inodes_util);
- buffer_json_add_array_item_double(wb, inodes_avail);
- buffer_json_add_array_item_double(wb, inodes_used);
- buffer_json_add_array_item_double(wb, inodes_reserved);
-
- buffer_json_array_close(wb);
- }
- dfe_done(mp);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- buffer_rrdf_table_add_field(wb, field_id++, "Mountpoint", "Mountpoint Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Filesystem", "Mountpoint Filesystem",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Root", "Mountpoint Root",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Used%", "Space Utilization",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "%", max_space_util, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Avail", "Space Avail",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "GiB", max_space_avail, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Used", "Space Used",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "GiB", max_space_used, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Reserved", "Space Reserved for root",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "GiB", max_space_reserved, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "iUsed%", "Inodes Utilization",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "%", max_inodes_util, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "iAvail", "Inodes Avail",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "inodes", max_inodes_avail, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "iUsed", "Inodes Used",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "inodes", max_inodes_used, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "iReserved", "Inodes Reserved for root",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "inodes", max_inodes_reserved, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "Used%");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "Utilization");
- {
- buffer_json_member_add_string(wb, "name", "Utilization");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Used%");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Usage");
- {
- buffer_json_member_add_string(wb, "name", "Usage");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Avail");
- buffer_json_add_array_item_string(wb, "Used");
- buffer_json_add_array_item_string(wb, "Reserved");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Inodes");
- {
- buffer_json_member_add_string(wb, "name", "Inodes");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "iAvail");
- buffer_json_add_array_item_string(wb, "iUsed");
- buffer_json_add_array_item_string(wb, "iReserved");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Utilization");
- buffer_json_add_array_item_string(wb, "Mountpoint");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Usage");
- buffer_json_add_array_item_string(wb, "Mountpoint");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- int response = HTTP_RESP_OK;
- if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
- buffer_flush(wb);
- response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
- }
-
- if(result_cb)
- result_cb(wb, response, result_cb_data);
-
- return response;
-}
-
-void *diskspace_main(void *ptr) {
- worker_register("DISKSPACE");
- worker_register_job_name(WORKER_JOB_MOUNTINFO, "mountinfo");
- worker_register_job_name(WORKER_JOB_MOUNTPOINT, "mountpoint");
- worker_register_job_name(WORKER_JOB_CLEANUP, "cleanup");
-
- rrd_collector_started();
- rrd_function_add(localhost, NULL, "mount-points", 10, RRDFUNCTIONS_DISKSPACE_HELP, true, diskspace_function_mount_points, NULL);
-
- netdata_thread_cleanup_push(diskspace_main_cleanup, ptr);
-
- cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
-
- int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
- if(update_every < localhost->rrd_update_every)
- update_every = localhost->rrd_update_every;
-
- check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
- if(check_for_new_mountpoints_every < update_every)
- check_for_new_mountpoints_every = update_every;
-
- netdata_mutex_init(&slow_mountinfo_mutex);
-
- diskspace_slow_thread = mallocz(sizeof(netdata_thread_t));
-
- struct slow_worker_data slow_worker_data = {.slow_thread = diskspace_slow_thread, .update_every = update_every};
-
- netdata_thread_create(
- diskspace_slow_thread,
- "P[diskspace slow]",
- NETDATA_THREAD_OPTION_JOINABLE,
- diskspace_slow_worker,
- &slow_worker_data);
-
- usec_t step = update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- /* usec_t hb_dt = */ heartbeat_next(&hb, step);
-
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
-
- // --------------------------------------------------------------------------
- // this is smart enough not to reload it every time
-
- worker_is_busy(WORKER_JOB_MOUNTINFO);
- mountinfo_reload(0);
-
- // --------------------------------------------------------------------------
- // disk space metrics
-
- netdata_mutex_lock(&slow_mountinfo_mutex);
- free_basic_mountinfo_list(slow_mountinfo_tmp_root);
- slow_mountinfo_tmp_root = NULL;
-
- struct mountinfo *mi;
- for(mi = disk_mountinfo_root; mi; mi = mi->next) {
- if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND)))
- continue;
-
- // exclude mounts made by ProtectHome and ProtectSystem systemd hardening options
- // https://github.com/netdata/netdata/issues/11498#issuecomment-950982878
- if(mi->flags & MOUNTINFO_READONLY && mi->flags & MOUNTINFO_IS_IN_SYSD_PROTECTED_LIST && !strcmp(mi->root, mi->mount_point))
- continue;
-
- worker_is_busy(WORKER_JOB_MOUNTPOINT);
- do_disk_space_stats(mi, update_every);
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
- }
- netdata_mutex_unlock(&slow_mountinfo_mutex);
-
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
-
- if(dict_mountpoints) {
- worker_is_busy(WORKER_JOB_CLEANUP);
- dictionary_walkthrough_read(dict_mountpoints, mount_point_cleanup_cb, NULL);
- }
-
- }
- worker_unregister();
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
deleted file mode 100644
index 2d5f92a6b..000000000
--- a/collectors/ebpf.plugin/Makefile.am
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-userebpfconfigdir=$(configdir)/ebpf.d
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userebpfconfigdir)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-ebpfconfigdir=$(libconfigdir)/ebpf.d
-dist_libconfig_DATA = \
- ebpf.d.conf \
- $(NULL)
-
-dist_ebpfconfig_DATA = \
- ebpf.d/ebpf_kernel_reject_list.txt \
- ebpf.d/cachestat.conf \
- ebpf.d/dcstat.conf \
- ebpf.d/disk.conf \
- ebpf.d/fd.conf \
- ebpf.d/filesystem.conf \
- ebpf.d/hardirq.conf \
- ebpf.d/mdflush.conf \
- ebpf.d/mount.conf \
- ebpf.d/network.conf \
- ebpf.d/oomkill.conf \
- ebpf.d/process.conf \
- ebpf.d/shm.conf \
- ebpf.d/softirq.conf \
- ebpf.d/sync.conf \
- ebpf.d/swap.conf \
- ebpf.d/vfs.conf \
- $(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
deleted file mode 100644
index 06915ea52..000000000
--- a/collectors/ebpf.plugin/README.md
+++ /dev/null
@@ -1,1071 +0,0 @@
-<!--
-title: "Kernel traces/metrics (eBPF) monitoring with Netdata"
-description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about yourcomplex applications with per-second granularity."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md"
-sidebar_label: "Kernel traces/metrics (eBPF)"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
-# Kernel traces/metrics (eBPF) collector
-
-The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture.
-
-> ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development.
-
-This document provides comprehensive details about the `ebpf.plugin`.
-For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
-
-<figure>
- <img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
- <figcaption>An example of virtual file system (VFS) charts made possible by the eBPF collector plugin.</figcaption>
-</figure>
-
-## How Netdata collects data using probes and tracepoints
-
-Netdata uses the following features from the Linux kernel to run eBPF programs:
-
-- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
- both options are available.
-- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available.
-- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures.
-
-In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is run instead, performing various collection logic before letting the kernel continue its normal control flow.
-
-There are more methods to trigger eBPF programs, such as uprobes, but currently are not supported.
-
-## Configuring ebpf.plugin
-
-The eBPF collector is installed and enabled by default on most new installations of the Agent.
-If your Agent is v1.22 or older, you may to enable the collector yourself.
-
-### Enable the eBPF collector
-
-To enable or disable the entire eBPF collector:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit `netdata.conf`.
-
- ```bash
- ./edit-config netdata.conf
- ```
-
-3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not
- `ebpf_process`) and set it to `yes`.
-
- ```conf
- [plugins]
- ebpf = yes
- ```
-
-### Configure the eBPF collector
-
-You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance]\(#performance opimization).
-
-To edit the `ebpf.d.conf`:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/ebpf.d.conf).
-
- ```bash
- ./edit-config ebpf.d.conf
- ```
-
- You can now edit the behavior of the eBPF collector. The following sections describe each configuration option in detail.
-
-### `[global]` configuration options
-
-The `[global]` section defines settings for the whole eBPF collector.
-
-#### eBPF load mode
-
-The collector uses two different eBPF programs. These programs rely on the same functions inside the kernel, but they
-monitor, process, and display different kinds of information.
-
-By default, this plugin uses the `entry` mode. Changing this mode can create significant overhead on your operating
-system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
-accepts the following values:
-
-- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in
- the sections above, and does not show charts related to errors.
-- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
- charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software,
- such as failing to close file descriptors or creating zombie processes.
-
-#### Integration with `apps.plugin`
-
-The eBPF collector also creates charts for each running application through an integration with the
-[`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
-interact with the Linux kernel.
-
-If you want to enable `apps.plugin` integration, change the "apps" setting to "yes".
-
-```conf
-[global]
- apps = yes
-```
-
-#### Integration with `cgroups.plugin`
-
-The eBPF collector also creates charts for each cgroup through an integration with the
-[`cgroups.plugin`](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
-interacts with the Linux kernel.
-
-The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
-_enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`.
-
-```conf
-[global]
- cgroups = yes
-```
-
-If you do not need to monitor specific metrics for your `cgroups`, you can enable `cgroups` inside
-`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the
-[Configuration](#configuring-ebpfplugin) section.
-
-#### Maps per Core
-
-When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or
-array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand,
-when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can
-increase overhead for processes.
-
-#### Collect PID
-
-When one of the previous integrations is enabled, `ebpf.plugin` will use Process Identifier (`PID`) to identify the
-process group for which it needs to plot data.
-
-There are different ways to collect PID, and you can select the way `ebpf.plugin` collects data with the following
-values:
-
-- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates
- child threads.
-- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time.
-- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host,
- because more memory needs to be allocated and parsed.
-
-The threads that have integration with other collectors have an internal clean up wherein they attach either a
-`trampoline` or a `kprobe` to `release_task` internal function. To avoid `overload` on this function, `ebpf.plugin`
-will only enable these threads integrated with other collectors when the kernel is compiled with
-`CONFIG_DEBUG_INFO_BTF`, unless you enable them manually.
-
-#### Collection period
-
-The plugin uses the option `update every` to define the number of seconds used for eBPF to send data for Netdata. The default value
-is 5 seconds.
-
-#### PID table size
-
-The option `pid table size` defines the maximum number of PIDs stored inside the application hash table. The default value
-is defined according [kernel](https://elixir.bootlin.com/linux/v6.0.19/source/include/linux/threads.h#L28) source code.
-
-#### Integration Dashboard Elements
-
-When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level
-Linux metrics:
-
-> Note: The parenthetical accompanying each bulleted item provides the chart name.
-
-- mem
- - Number of processes killed due out of memory. (`oomkills`)
-- process
- - Number of processes created with `do_fork`. (`process_create`)
- - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel
- version. (`thread_create`)
- - Number of times that a process called `do_exit`. (`task_exit`)
- - Number of times that a process called `release_task`. (`task_close`)
- - Number of times that an error happened to create thread or process. (`task_error`)
-- swap
- - Number of calls to `swap_readpage`. (`swap_read_call`)
- - Number of calls to `swap_writepage`. (`swap_write_call`)
-- network
- - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`)
- - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`)
- - Number of bytes sent. (`total_bandwidth_sent`)
- - Number of bytes received. (`total_bandwidth_recv`)
- - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`)
- - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`)
- - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`)
- - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`)
- - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`)
-- file access
- - Number of calls to open files. (`file_open`)
- - Number of calls to open files that returned errors. (`open_error`)
- - Number of files closed. (`file_closed`)
- - Number of calls to close files that returned errors. (`file_error_closed`)
-- vfs
- - Number of calls to `vfs_unlink`. (`file_deleted`)
- - Number of calls to `vfs_write`. (`vfs_write_call`)
- - Number of calls to write a file that returned errors. (`vfs_write_error`)
- - Number of calls to `vfs_read`. (`vfs_read_call`)
- - - Number of calls to read a file that returned errors. (`vfs_read_error`)
- - Number of bytes written with `vfs_write`. (`vfs_write_bytes`)
- - Number of bytes read with `vfs_read`. (`vfs_read_bytes`)
- - Number of calls to `vfs_fsync`. (`vfs_fsync`)
- - Number of calls to sync file that returned errors. (`vfs_fsync_error`)
- - Number of calls to `vfs_open`. (`vfs_open`)
- - Number of calls to open file that returned errors. (`vfs_open_error`)
- - Number of calls to `vfs_create`. (`vfs_create`)
- - Number of calls to open file that returned errors. (`vfs_create_error`)
-- page cache
- - Ratio of pages accessed. (`cachestat_ratio`)
- - Number of modified pages ("dirty"). (`cachestat_dirties`)
- - Number of accessed pages. (`cachestat_hits`)
- - Number of pages brought from disk. (`cachestat_misses`)
-- directory cache
- - Ratio of files available in directory cache. (`dc_hit_ratio`)
- - Number of files accessed. (`dc_reference`)
- - Number of files accessed that were not in cache. (`dc_not_cache`)
- - Number of files not found. (`dc_not_found`)
-- ipc shm
- - Number of calls to `shm_get`. (`shmget_call`)
- - Number of calls to `shm_at`. (`shmat_call`)
- - Number of calls to `shm_dt`. (`shmdt_call`)
- - Number of calls to `shm_ctl`. (`shmctl_call`)
-
-### `[ebpf programs]` configuration options
-
-The eBPF collector enables and runs the following eBPF programs by default:
-
-- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
- [`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
- for each application.
-- `fd` : This eBPF program creates charts that show information about calls to open files.
-- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
-- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
-- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also
- creates charts showing errors when these operations are executed.
-- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware
- interrupt requests (hard IRQs).
-- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software
- interrupt requests (soft IRQs).
-- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via
- the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps
- integration is turned on or off.
-
-You can also enable the following eBPF programs:
-
-- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
- `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are
- not found.
-- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
-- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
-- `swap` : This eBPF program creates charts that show information about swap access.
-- `mdflush`: This eBPF program creates charts that show information about
-- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
-- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
- bandwidth consumed by each.
- multi-device software flushes.
-- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
-
-### Configuring eBPF threads
-
-You can configure each thread of the eBPF data collector. This allows you to overwrite global options defined in `/etc/netdata/ebpf.d.conf` and configure specific options for each thread.
-
-To configure an eBPF thread:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit a thread configuration file. The following configuration files are available:
-
- - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
- lets you specify which network the eBPF collector monitors.
- - `process.conf`: Configuration for the [`process` thread](#sync-configuration).
- - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration).
- - `dcstat.conf`: Configuration for the `dcstat` thread.
- - `disk.conf`: Configuration for the `disk` thread.
- - `fd.conf`: Configuration for the `file descriptor` thread.
- - `filesystem.conf`: Configuration for the `filesystem` thread.
- - `hardirq.conf`: Configuration for the `hardirq` thread.
- - `softirq.conf`: Configuration for the `softirq` thread.
- - `sync.conf`: Configuration for the `sync` thread.
- - `vfs.conf`: Configuration for the `vfs` thread.
-
- ```bash
- ./edit-config FILE.conf
- ```
-
-### Network configuration
-
-The network configuration has specific options to configure which network(s) the eBPF collector monitors. These options
-are divided in the following sections:
-
-#### `[network connections]`
-
-You can configure the information shown with function `ebpf_socket` using the settings in this section.
-
-```conf
-[network connections]
- enabled = yes
- resolve hostname ips = no
- resolve service names = yes
- ports = 1-1024 !145 !domain
- hostnames = !example.com
- ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7
-```
-
-When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
-write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
-[simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
-specific values or asterisk alone to define all values.
-
-In the above example, Netdata will collect metrics for all ports between `1` and `1024`, with the exception of `53` (domain)
-and `145`.
-
-The following options are available:
-
-- `enabled`: Disable network connections monitoring. This can affect directly some funcion output.
-- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow.
-- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`.
- all names are read from /etc/services.
-- `ports`: Define the destination ports for Netdata to monitor.
-- `hostnames`: The list of hostnames that can be resolved to an IP address.
-- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
- range of IPs, or use CIDR values.
-
-By default the traffic table is created using the destination IPs and ports of the sockets. This can be
-changed, so that Netdata uses service names (if possible), by specifying `resolve service name = yes` in the configuration
-section.
-
-#### `[service name]`
-
-Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain
-the name for a particular service you use in your infrastructure, you will need to add it to the `[service name]`
-section.
-
-For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
-service in network connection charts, and thus see the name of the service instead of its port, define it:
-
-```conf
-[service name]
- 19999 = Netdata
-```
-
-### Sync configuration
-
-The sync configuration has specific options to disable monitoring for syscalls. All syscalls are monitored by default.
-
-```conf
-[syscalls]
- sync = yes
- msync = yes
- fsync = yes
- fdatasync = yes
- syncfs = yes
- sync_file_range = yes
-```
-
-### Filesystem configuration
-
-The filesystem configuration has specific options to disable monitoring for filesystems; by default, all filesystems are
-monitored.
-
-```conf
-[filesystem]
- btrfsdist = yes
- ext4dist = yes
- nfsdist = yes
- xfsdist = yes
- zfsdist = yes
-```
-
-The ebpf program `nfsdist` monitors only `nfs` mount points.
-
-## Troubleshooting
-
-If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
-output.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash ./ebpf.plugin
-```
-
-You can also use `grep` to search the Agent's `error.log` for messages related to eBPF monitoring.
-
-```bash
-grep -i ebpf /var/log/netdata/error.log
-```
-
-### Confirm kernel compatibility
-
-The eBPF collector only works on Linux systems and with specific Linux kernels. We support all kernels more recent than
-`4.11.0`, and all kernels on CentOS 7.6 or later.
-
-You can run our helper script to determine whether your system can support eBPF monitoring. If it returns no output, your system is ready to compile and run the eBPF collector.
-
-```bash
-curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash
-```
-
-
-If you see a warning about a missing kernel
-configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel
-to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version.
-Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the
-kernel, ensuring that you set all the necessary
-
-- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
-- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
-- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
-- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
-- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
-- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
-
-### Mount `debugfs` and `tracefs`
-
-The eBPF collector also requires both the `tracefs` and `debugfs` filesystems. Try mounting the `tracefs` and `debugfs`
-filesystems using the commands below:
-
-```bash
-sudo mount -t debugfs nodev /sys/kernel/debug
-sudo mount -t tracefs nodev /sys/kernel/tracing
-```
-
-If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to
-mount these filesystems on startup. More information can be found in
-the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
-
-## Charts
-
-The eBPF collector creates charts on different menus, like System Overview, Memory, MD arrays, Disks, Filesystem,
-Mount Points, Networking Stack, systemd Services, and Applications.
-
-The collector stores the actual value inside of its process, but charts only show the difference between the values
-collected in the previous and current seconds.
-
-### System overview
-
-Not all charts within the System Overview menu are enabled by default. Charts that rely on `kprobes` are disabled by default because they add around 100ns overhead for each function call. This is a small number from a human's perspective, but the functions are called many times and create an impact
-on host. See the [configuration](#configuring-ebpfplugin) section for details about how to enable them.
-
-#### Processes
-
-Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
-system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF
-collector uses the following `tracepoints` and `kprobe`:
-
-- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`.
-- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall.
-- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415)
- routine since kernel `5.10.0` was released.
-- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16`
-- `kprobe/do_fork`: This was the main function before kernel `4.2.0`.
-
-#### Process Exit
-
-Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
-system that the task is finishing its work. The second step is to release the kernel information with the internal
-function `release_task`. The difference between the two dimensions can help you discover
-[zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses:
-
-- `sched/sched_process_exit`: Tracepoint called after a task exits.
-- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process
- descriptor.
-
-#### Task error
-
-The functions responsible for ending tasks do not return values, so this chart contains information about failures on
-process and thread creation only.
-
-#### Swap
-
-Inside the swap submenu the eBPF plugin creates the chart `swapcalls`; this chart is displaying when processes are
-calling functions [`swap_readpage` and `swap_writepage`](https://hzliu123.github.io/linux-kernel/Page%20Cache%20in%20Linux%202.6.pdf),
-which are functions responsible for doing IO in swap memory. To collect the exact moment that an access to swap happens,
-the collector attaches `kprobes` for cited functions.
-
-#### Soft IRQ
-
-The following `tracepoints` are used to measure time usage for soft IRQs:
-
-- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called
- before softirq handler
-- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when
- softirq handler returns.
-
-#### Hard IRQ
-
-The following tracepoints are used to measure the latency of servicing a
-hardware interrupt request (hard IRQ).
-
-- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry):
- Called immediately before the IRQ action handler.
-- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit):
- Called immediately after the IRQ action handler returns.
-- `irq_vectors`: These are traces from `irq_handler_entry` and
- `irq_handler_exit` when an IRQ is handled. The following elements from vector
- are triggered:
- - `irq_vectors/local_timer_entry`
- - `irq_vectors/local_timer_exit`
- - `irq_vectors/reschedule_entry`
- - `irq_vectors/reschedule_exit`
- - `irq_vectors/call_function_entry`
- - `irq_vectors/call_function_exit`
- - `irq_vectors/call_function_single_entry`
- - `irq_vectors/call_function_single_xit`
- - `irq_vectors/irq_work_entry`
- - `irq_vectors/irq_work_exit`
- - `irq_vectors/error_apic_entry`
- - `irq_vectors/error_apic_exit`
- - `irq_vectors/thermal_apic_entry`
- - `irq_vectors/thermal_apic_exit`
- - `irq_vectors/threshold_apic_entry`
- - `irq_vectors/threshold_apic_exit`
- - `irq_vectors/deferred_error_entry`
- - `irq_vectors/deferred_error_exit`
- - `irq_vectors/spurious_apic_entry`
- - `irq_vectors/spurious_apic_exit`
- - `irq_vectors/x86_platform_ipi_entry`
- - `irq_vectors/x86_platform_ipi_exit`
-
-#### IPC shared memory
-
-To monitor shared memory system call counts, Netdata attaches tracing in the following functions:
-
-- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called.
-- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called.
-
-### Memory
-
-In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following
-organization:
-
-- Page Cache
- - Page cache ratio
- - Dirty pages
- - Page cache hits
- - Page cache misses
-- Synchronization
- - File sync
- - Memory map sync
- - File system sync
- - File range sync
-
-#### Page cache hits
-
-When the processor needs to read or write a location in main memory, it checks for a corresponding entry in the page cache.
- If the entry is there, a page cache hit has occurred and the read is from the cache.
-
-A page cache hit is when the page cache is successfully accessed with a read operation. We do not count pages that were
-added relatively recently.
-
-#### Dirty pages
-
-A "dirty page" is a page in the page cache that was modified after being created. Since non-dirty pages in the page cache
- have identical copies in secondary storage (e.g. hard disk drive or solid-state drive), discarding and reusing their space
- is much quicker than paging out application memory, and is often preferred over flushing the dirty pages into secondary storage
- and reusing their space.
-
-On `cachestat_dirties` Netdata demonstrates the number of pages that were modified. This chart shows the number of calls
-to the function `mark_buffer_dirty`.
-
-#### Page cache ratio
-
-When the processor needs to read or write in a specific memory address, it checks for a corresponding entry in the page cache.
-If the processor hits a page cache (`page cache hit`), it reads the entry from the cache. If there is no entry (`page cache miss`),
- the kernel allocates a new entry and copies data from the disk. Netdata calculates the percentage of accessed files that are cached on
- memory. The ratio is calculated counting the accessed cached pages
- (without counting [dirty pages](#dirty-pages) and pages added because of read misses) divided by total access without dirty pages.
-
-> \_\_**\_\_\_\_**<ins>Number of accessed cached pages</ins>\***\*\_\_\*\***<br/>
-> Number of total accessed pages - dirty pages - missed pages
-
-The chart `cachestat_ratio` shows how processes are accessing page cache. In a normal scenario, we expect values around
-100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata
-attaches `kprobes` for kernel functions:
-
-- `add_to_page_cache_lru`: Page addition.
-- `mark_page_accessed`: Access to cache.
-- `account_page_dirtied`: Dirty (modified) pages.
-- `mark_buffer_dirty`: Writes to page cache.
-
-#### Page cache misses
-
-A page cache miss means that a page was not inside memory when the process tried to access it. This chart shows the
-result of the difference for calls between functions `add_to_page_cache_lru` and `account_page_dirtied`.
-
-#### File sync
-
-This chart shows calls to synchronization methods, [`fsync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html)
-and [`fdatasync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html), to transfer all modified page caches
-for the files on disk devices. These calls block until the disk reports that the transfer has been completed. They flush
-data for specific file descriptors.
-
-#### Memory map sync
-
-The chart shows calls to [`msync(2)`](https://man7.org/linux/man-pages/man2/msync.2.html) syscalls. This syscall flushes
-changes to a file that was mapped into memory using [`mmap(2)`](https://man7.org/linux/man-pages/man2/mmap.2.html).
-
-#### File system sync
-
-This chart monitors calls demonstrating commits from filesystem caches to disk. Netdata attaches `tracing` for
-[`sync(2)`](https://man7.org/linux/man-pages/man2/sync.2.html), and [`syncfs(2)`](https://man7.org/linux/man-pages/man2/sync.2.html).
-
-#### File range sync
-
-This chart shows calls to [`sync_file_range(2)`](https://man7.org/linux/man-pages/man2/sync_file_range.2.html) which
-synchronizes file segments with disk.
-
-> Note: This is the most dangerous syscall to synchronize data, according to its manual.
-
-### Multiple Device (MD) arrays
-
-The eBPF plugin shows multi-device flushes happening in real time. This can be used to explain some spikes happening
-in [disk latency](#disk) charts.
-
-By default, MD flush is disabled. To enable it, configure your
-`/etc/netdata/ebpf.d.conf` file as:
-
-```conf
-[global]
- mdflush = yes
-```
-
-#### MD flush
-
-To collect data related to Linux multi-device (MD) flushing, the following kprobe is used:
-
-- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made.
-
-### Disk
-
-The eBPF plugin also shows a chart in the Disk section when the `disk` thread is enabled.
-
-#### Disk Latency
-
-This will create the chart `disk_latency_io` for each disk on the host. The following tracepoints are used:
-
-- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue):
- IO request operation to a device drive.
-- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete):
- IO operation completed by device.
-
-Disk Latency is the single most important metric to focus on when it comes to storage performance, under most circumstances.
-For hard drives, an average latency somewhere between 10 to 20 ms can be considered acceptable. For SSD (Solid State Drives),
-in most cases, workloads experience less than 1 ms latency numbers, but workloads should never reach higher than 3 ms.
-The dimensions refer to time intervals.
-
-### Filesystem
-
-This group has charts demonstrating how applications interact with the Linux kernel to open and close file descriptors.
-It also brings latency charts for several different filesystems.
-
-#### Latency Algorithm
-
-We calculate the difference between the calling and return times, spanning disk I/O, file system operations (lock, I/O),
-run queue latency and all events related to the monitored action.
-
-#### ext4
-
-To measure the latency of executing some actions in an
-[ext4](https://elixir.bootlin.com/linux/latest/source/fs/ext4) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `ext4_file_read_iter`: Function used to measure read latency.
-- `ext4_file_write_iter`: Function used to measure write latency.
-- `ext4_file_open`: Function used to measure open latency.
-- `ext4_sync_file`: Function used to measure sync latency.
-
-#### ZFS
-
-To measure the latency of executing some actions in a zfs filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `zpl_iter_read`: Function used to measure read latency.
-- `zpl_iter_write`: Function used to measure write latency.
-- `zpl_open`: Function used to measure open latency.
-- `zpl_fsync`: Function used to measure sync latency.
-
-#### XFS
-
-To measure the latency of executing some actions in an
-[xfs](https://elixir.bootlin.com/linux/latest/source/fs/xfs) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `xfs_file_read_iter`: Function used to measure read latency.
-- `xfs_file_write_iter`: Function used to measure write latency.
-- `xfs_file_open`: Function used to measure open latency.
-- `xfs_file_fsync`: Function used to measure sync latency.
-
-#### NFS
-
-To measure the latency of executing some actions in an
-[nfs](https://elixir.bootlin.com/linux/latest/source/fs/nfs) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `nfs_file_read`: Function used to measure read latency.
-- `nfs_file_write`: Function used to measure write latency.
-- `nfs_file_open`: Functions used to measure open latency.
-- `nfs4_file_open`: Functions used to measure open latency for NFS v4.
-- `nfs_getattr`: Function used to measure sync latency.
-
-#### btrfs
-
-To measure the latency of executing some actions in a [btrfs](https://elixir.bootlin.com/linux/latest/source/fs/btrfs/file.c)
-filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of the following functions:
-
-> Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or
-> `generic_file_read_iter`, depending on kernel version.
-
-- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`.
-- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`.
-- `btrfs_file_write_iter`: Function used to write data.
-- `btrfs_file_open`: Function used to open files.
-- `btrfs_sync_file`: Function used to synchronize data to filesystem.
-
-#### File descriptor
-
-To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these
-events, the collector attaches `kprobes` for the common function used for syscalls:
-
-- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to
- open files.
-- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162):
- Function called from `do_sys_open` since version `5.6.0`.
-- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file
- descriptor since kernel `5.11.0`.
-- `__close_fd`: Function used to close files before version `5.11.0`.
-
-#### File error
-
-This chart shows the number of times some software tried and failed to open or close a file descriptor.
-
-#### VFS
-
-The Linux Virtual File System (VFS) is an abstraction layer on top of a
-concrete filesystem like the ones listed in the parent section, e.g. `ext4`.
-
-In this section we list the mechanism by which we gather VFS data, and what
-charts are consequently created.
-
-##### VFS eBPF Hooks
-
-To measure the latency and total quantity of executing some VFS-level
-functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the
-following functions:
-
-- `vfs_write`: Function used monitoring the number of successful & failed
- filesystem write calls, as well as the total number of written bytes.
-- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a
- single write operation using a group of buffers rather than 1).
-- `vfs_read`: Function used for monitoring the number of successful & failed
- filesystem read calls, as well as the total number of read bytes.
-- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
- read operation using a group of buffers rather than 1).
-- `vfs_unlink`: Function used for monitoring the number of successful & failed
- filesystem unlink calls.
-- `vfs_fsync`: Function used for monitoring the number of successful & failed
- filesystem fsync calls.
-- `vfs_open`: Function used for monitoring the number of successful & failed
- filesystem open calls.
-- `vfs_create`: Function used for monitoring the number of successful & failed
- filesystem create calls.
-
-##### VFS Deleted objects
-
-This chart monitors calls to `vfs_unlink`. This function is responsible for removing objects from the file system.
-
-##### VFS IO
-
-This chart shows the number of calls to the functions `vfs_read` and `vfs_write`.
-
-##### VFS IO bytes
-
-This chart also monitors `vfs_read` and `vfs_write` but, instead of the number of calls, it shows the total amount of
-bytes read and written with these functions.
-
-The Agent displays the number of bytes written as negative because they are moving down to disk.
-
-##### VFS IO errors
-
-The Agent counts and shows the number of instances where a running program experiences a read or write error.
-
-##### VFS Create
-
-This chart shows the number of calls to `vfs_create`. This function is responsible for creating files.
-
-##### VFS Synchronization
-
-This chart shows the number of calls to `vfs_fsync`. This function is responsible for calling `fsync(2)` or
-`fdatasync(2)` on a file. You can see more details in the Synchronization section.
-
-##### VFS Open
-
-This chart shows the number of calls to `vfs_open`. This function is responsible for opening files.
-
-#### Directory Cache
-
-Metrics for directory cache are collected using kprobe for `lookup_fast`, because we are interested in the number of
-times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it
-is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used:
-
-- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache.
-- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223):
- Called when the desired file is not inside the directory cache.
-
-##### Directory Cache Interpretation
-
-When directory cache is showing 100% that means that every accessed file was present in the directory cache.
-If files are not present in the directory cache, they are either not present in the file system or the files were not
-accessed before.
-
-### Mount Points
-
-The following `tracing` are used to collect `mount` & `unmount` call counts:
-
-- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host.
-- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host.
-
-### Networking Stack
-
-Netdata monitors socket bandwidth attaching `tracing` for internal functions.
-
-#### TCP outbound connections
-
-This chart demonstrates calls to `tcp_v4_connection` and `tcp_v6_connection` that start connections for IPV4 and IPV6, respectively.
-
-#### TCP inbound connections
-
-This chart demonstrates TCP and UDP connections that the host receives.
-To collect this information, netdata attaches a tracing to `inet_csk_accept`.
-
-#### TCP bandwidth functions
-
-This chart demonstrates calls to functions `tcp_sendmsg`, `tcp_cleanup_rbuf`, and `tcp_close`; these functions are used
-to send & receive data and to close connections when `TCP` protocol is used.
-
-#### TCP bandwidth
-
-This chart demonstrates calls to functions:
-
-- `tcp_sendmsg`: Function responsible to send data for a specified destination.
-- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic
- and we would also need to add more `tracing` to get the socket and package size.
-- `tcp_close`: Function responsible to close connection.
-
-#### TCP retransmit
-
-This chart demonstrates calls to function `tcp_retransmit` that is responsible for executing TCP retransmission when the
-receiver did not return the packet during the expected time.
-
-#### UDP functions
-
-This chart demonstrates calls to functions `udp_sendmsg` and `udp_recvmsg`, which are responsible for sending &
-receiving data for connections when the `UDP` protocol is used.
-
-#### UDP bandwidth
-
-Like the previous chart, this one also monitors `udp_sendmsg` and `udp_recvmsg`, but instead of showing the number of
-calls, it monitors the number of bytes sent and received.
-
-### Apps
-
-#### OOM Killing
-
-These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes.
-
-- `oom/mark_victim`: Monitors when an oomkill event happens.
-
-## Known issues
-
-### Performance opimization
-
-eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin
-significantly increases kernel memory usage by several hundred MB.
-
-When the integration with apps or cgroup is enabled, the eBPF collector allocates memory for each process running. If your
-node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart, consider:
-
-- Modify [maps per core](#maps-per-core) to use only one map.
-- Disable [integration with apps](#integration-with-appsplugin).
-- Disable [integration with cgroup](#integration-with-cgroupsplugin).
-
-If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
-in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin).
-Next, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with
-`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly.
-
-Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size)
-in `ebpf.conf`.
-
-The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/)
-for eBPF, this is not a bug present in plugin.
-
-### SELinux
-
-When [SELinux](https://www.redhat.com/en/topics/linux/what-is-selinux) is enabled, it may prevent `ebpf.plugin` from
-starting correctly. Check the Agent's `error.log` file for errors like the ones below:
-
-```bash
-2020-06-14 15:32:08: ebpf.plugin ERROR : EBPF PROCESS : Cannot load program: /usr/libexec/netdata/plugins.d/pnetdata_ebpf_process.3.10.0.o (errno 13, Permission denied)
-2020-06-14 15:32:19: netdata ERROR : PLUGINSD[ebpf] : read failed: end of file (errno 9, Bad file descriptor)
-```
-
-You can also check for errors related to `ebpf.plugin` inside `/var/log/audit/audit.log`:
-
-```bash
-type=AVC msg=audit(1586260134.952:97): avc: denied { map_create } for pid=1387 comm="ebpf.pl" scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:unconfined_service_t:s0 tclass=bpf permissive=0
-type=SYSCALL msg=audit(1586260134.952:97): arch=c000003e syscall=321 success=no exit=-13 a0=0 a1=7ffe6b36f000 a2=70 a3=0 items=0 ppid=1135 pid=1387 auid=4294967295 uid=994 gid=990 euid=0 suid=0 fsuid=0 egid=990 sgid=990 fsgid=990 tty=(none) ses=4294967295 comm="ebpf_proc
-ess.pl" exe="/usr/libexec/netdata/plugins.d/ebpf.plugin" subj=system_u:system_r:unconfined_service_t:s0 key=(null)
-```
-
-If you see similar errors, you will have to adjust SELinux's policies to enable the eBPF collector.
-
-#### Creation of bpf policies
-
-To enable `ebpf.plugin` to run on a distribution with SELinux enabled, it will be necessary to take the following
-actions.
-
-First, stop the Netdata Agent.
-
-```bash
-# systemctl stop netdata
-```
-
-Next, create a policy with the `audit.log` file you examined earlier.
-
-```bash
-# grep ebpf.plugin /var/log/audit/audit.log | audit2allow -M netdata_ebpf
-```
-
-This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`.
-
-Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of
-the `netdata_ebpf.te` file.
-
-```conf
-module netdata_ebpf 1.0;
-require {
- type unconfined_service_t;
- class bpf { map_create map_read map_write prog_load prog_run };
-}
-#============= unconfined_service_t ==============
-allow unconfined_service_t self:bpf { map_create map_read map_write prog_load prog_run };
-```
-
-Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies:
-
-```bash
-# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
-# semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod
-```
-
-Finally, you can load the new policy and start the Netdata agent again:
-
-```bash
-# semodule -i netdata_ebpf.pp
-# systemctl start netdata
-```
-
-### Linux kernel lockdown
-
-Beginning with [version 5.4](https://www.zdnet.com/article/linux-to-get-kernel-lockdown-feature/), the Linux kernel has
-a feature called "lockdown," which may affect `ebpf.plugin` depending how the kernel was compiled. The following table
-shows how the lockdown module impacts `ebpf.plugin` based on the selected options:
-
-| Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? |
-| :------------------------ | :-------------------------------- | :-------------------- | :------------------------------- |
-| YES | NO | NO | YES |
-| YES | Yes | None | YES |
-| YES | Yes | Integrity | YES |
-| YES | Yes | Confidentiality | NO |
-
-If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries
-required to run `ebpf.plugin`.
-
-## Functions
-
-### ebpf_thread
-
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
-`ebpf_thread` that controls its internal threads and helps to reduce the overhead on host. Using the function you
-can run the plugin with all threads disabled and enable them only when you want to take a look in specific areas.
-
-#### List threads
-
-To list all threads status you can query directly the endpoint function:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread`
-
-It is also possible to query a specific thread adding keyword `thread` and thread name:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20thread:mount`
-
-#### Enable thread
-
-It is possible to enable a specific thread using the keyword `enable`:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20enable:mount`
-
-this will run thread `mount` during 300 seconds (5 minutes). You can specify a specific period by appending the period
-after the thread name:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20enable:mount:600`
-
-in this example thread `mount` will run during 600 seconds (10 minutes).
-
-#### Disable thread
-
-It is also possible to stop any thread running using the keyword `disable`. For example, to disable `cachestat` you can
-request:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20disable:cachestat`
-
-#### Debugging threads
-
-You can verify the impact of threads on the host by running the
-[ebpf_thread_function.sh](https://github.com/netdata/netdata/blob/master/tests/ebpf/ebpf_thread_function.sh)
-script on your environment.
-
-You can check the results of having threads running on your environment in the Netdata monitoring section on your
-dashboard
-
-<img src="https://github.com/netdata/netdata/assets/49162938/91823573-114c-4c16-b634-cc46f7bb1bcf" alt="Threads running." />
-
-### ebpf_socket
-
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
-`ebpf_socket` that shows the current status of open sockets on host.
-
-#### Families
-
-The plugin shows by default sockets for IPV4 and IPV6, but it is possible to select a specific family by passing the
-family as an argument:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20family:IPV4`
-
-#### Resolve
-
-The plugin resolves ports to service names by default. You can show the port number by disabling the name resolution:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20resolve:NO`
-
-#### CIDR
-
-The plugin shows connections for all possible destination IPs by default. You can limit the range by specifying the CIDR:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20cidr:192.168.1.0/24`
-
-#### PORT
-
-The plugin shows connections for all possible ports by default. You can limit the range by specifying a port or range
-of ports:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20port:1-1024`
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
deleted file mode 100644
index a8e621643..000000000
--- a/collectors/ebpf.plugin/ebpf.c
+++ /dev/null
@@ -1,4126 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <ifaddrs.h>
-
-#include "ebpf.h"
-#include "ebpf_socket.h"
-#include "ebpf_unittest.h"
-#include "libnetdata/required_dummies.h"
-
-/*****************************************************************
- *
- * GLOBAL VARIABLES
- *
- *****************************************************************/
-
-char *ebpf_plugin_dir = PLUGINS_DIR;
-static char *ebpf_configured_log_dir = LOG_DIR;
-
-char *ebpf_algorithms[] = {"absolute", "incremental"};
-struct config collector_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-int running_on_kernel = 0;
-int ebpf_nprocs;
-int isrh = 0;
-int main_thread_id = 0;
-int process_pid_fd = -1;
-static size_t global_iterations_counter = 1;
-bool publish_internal_metrics = true;
-
-pthread_mutex_t lock;
-pthread_mutex_t ebpf_exit_cleanup;
-pthread_mutex_t collect_data_mutex;
-
-struct netdata_static_thread cgroup_integration_thread = {
- .name = "EBPF CGROUP INT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
-ebpf_module_t ebpf_modules[] = {
- { .info = {.thread_name = "process",
- .config_name = "process",
- .thread_description = NETDATA_EBPF_MODULE_PROCESS_DESC},
- .functions = {.start_routine = ebpf_process_thread,
- .apps_routine = ebpf_process_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config,
- .config_file = NETDATA_PROCESS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 |
- NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 },
- { .info = {.thread_name = "socket",
- .config_name = "socket",
- .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC},
- .functions = {.start_routine = ebpf_socket_thread,
- .apps_routine = ebpf_socket_create_apps_charts,
- .fnct_routine = ebpf_socket_read_open_connections,
- .fcnt_name = EBPF_FUNCTION_SOCKET,
- .fcnt_desc = EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION,
- .fcnt_thread_chart_name = NULL,
- .fcnt_thread_lifetime_name = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
- .config_file = NETDATA_NETWORK_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "cachestat", .config_name = "cachestat", .thread_description = NETDATA_EBPF_CACHESTAT_MODULE_DESC},
- .functions = {.start_routine = ebpf_cachestat_thread,
- .apps_routine = ebpf_cachestat_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = cachestat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config,
- .config_file = NETDATA_CACHESTAT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18|
- NETDATA_V5_4 | NETDATA_V5_14 | NETDATA_V5_15 | NETDATA_V5_16,
- .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "sync",
- .config_name = "sync",
- .thread_description = NETDATA_EBPF_SYNC_MODULE_DESC},
- .functions = {.start_routine = ebpf_sync_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .maps = NULL,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE,
- // All syscalls have the same kernels
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "dc",
- .config_name = "dc",
- .thread_description = NETDATA_EBPF_DC_MODULE_DESC},
- .functions = {.start_routine = ebpf_dcstat_thread,
- .apps_routine = ebpf_dcstat_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = dcstat_maps,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
- .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "swap", .config_name = "swap", .thread_description = NETDATA_EBPF_SWAP_MODULE_DESC},
- .functions = {.start_routine = ebpf_swap_thread,
- .apps_routine = ebpf_swap_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
- .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "vfs",
- .config_name = "vfs",
- .thread_description = NETDATA_EBPF_VFS_MODULE_DESC},
- .functions = {.start_routine = ebpf_vfs_thread,
- .apps_routine = ebpf_vfs_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
- .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "filesystem", .config_name = "filesystem", .thread_description = NETDATA_EBPF_FS_MODULE_DESC},
- .functions = {.start_routine = ebpf_filesystem_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
- .config_file = NETDATA_FILESYSTEM_CONFIG_FILE,
- //We are setting kernels as zero, because we load eBPF programs according the kernel running.
- .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "disk",
- .config_name = "disk",
- .thread_description = NETDATA_EBPF_DISK_MODULE_DESC},
- .functions = {.start_routine = ebpf_disk_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
- .config_file = NETDATA_DISK_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "mount",
- .config_name = "mount",
- .thread_description = NETDATA_EBPF_MOUNT_MODULE_DESC},
- .functions = {.start_routine = ebpf_mount_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
- .config_file = NETDATA_MOUNT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "fd",
- .config_name = "fd",
- .thread_description = NETDATA_EBPF_FD_MODULE_DESC},
- .functions = {.start_routine = ebpf_fd_thread,
- .apps_routine = ebpf_fd_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
- .config_file = NETDATA_FD_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11 |
- NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "hardirq",
- .config_name = "hardirq",
- .thread_description = NETDATA_EBPF_HARDIRQ_MODULE_DESC},
- .functions = {.start_routine = ebpf_hardirq_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
- .config_file = NETDATA_HARDIRQ_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "softirq",
- .config_name = "softirq",
- .thread_description = NETDATA_EBPF_SOFTIRQ_MODULE_DESC},
- .functions = {.start_routine = ebpf_softirq_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL },
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
- .config_file = NETDATA_SOFTIRQ_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "oomkill",
- .config_name = "oomkill",
- .thread_description = NETDATA_EBPF_OOMKILL_MODULE_DESC},
- .functions = {.start_routine = ebpf_oomkill_thread,
- .apps_routine = ebpf_oomkill_create_apps_charts,
- .fnct_routine = NULL},.enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
- .config_file = NETDATA_OOMKILL_CONFIG_FILE,
- .kernels = NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "shm",
- .config_name = "shm",
- .thread_description = NETDATA_EBPF_SHM_MODULE_DESC},
- .functions = {.start_routine = ebpf_shm_thread,
- .apps_routine = ebpf_shm_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
- .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "mdflush",
- .config_name = "mdflush",
- .thread_description = NETDATA_EBPF_MD_MODULE_DESC},
- .functions = {.start_routine = ebpf_mdflush_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
- .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = mdflush_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "functions",
- .config_name = "functions",
- .thread_description = NETDATA_EBPF_FUNCTIONS_MODULE_DESC},
- .functions = {.start_routine = ebpf_function_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = NULL,
- .config_file = NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = NULL, .config_name = NULL},
- .functions = {.start_routine = NULL, .apps_routine = NULL, .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
- .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET,
- .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .maps = NULL,
- .pid_map_size = 0, .names = NULL, .cfg = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY,
- .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
-};
-
-struct netdata_static_thread ebpf_threads[] = {
- {
- .name = "EBPF PROCESS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SOCKET",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF CACHESTAT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SYNC",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF DCSTAT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SWAP",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF VFS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FILESYSTEM",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF DISK",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF MOUNT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FD",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF HARDIRQ",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SOFTIRQ",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF OOMKILL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SHM",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF MDFLUSH",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FUNCTIONS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
-#ifdef NETDATA_DEV_MODE
- .enabled = 1,
-#else
- .enabled = 0,
-#endif
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = NULL,
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 0,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
-};
-
-ebpf_filesystem_partitions_t localfs[] =
- {{.filesystem = "ext4",
- .optional_filesystem = NULL,
- .family = "ext4",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "ext4_file_read_iter",
- "ext4_file_write_iter",
- "ext4_file_open",
- "ext4_sync_file",
- NULL }},
- {.filesystem = "xfs",
- .optional_filesystem = NULL,
- .family = "xfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "xfs_file_read_iter",
- "xfs_file_write_iter",
- "xfs_file_open",
- "xfs_file_fsync",
- NULL }},
- {.filesystem = "nfs",
- .optional_filesystem = "nfs4",
- .family = "nfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_ATTR_CHARTS,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "nfs_file_read",
- "nfs_file_write",
- "nfs_open",
- "nfs_getattr",
- NULL }}, // // "nfs4_file_open" - not present on all kernels
- {.filesystem = "zfs",
- .optional_filesystem = NULL,
- .family = "zfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "zpl_iter_read",
- "zpl_iter_write",
- "zpl_open",
- "zpl_fsync",
- NULL }},
- {.filesystem = "btrfs",
- .optional_filesystem = NULL,
- .family = "btrfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = "btrfs_file_operations", .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "btrfs_file_read_iter",
- "btrfs_file_write_iter",
- "btrfs_file_open",
- "btrfs_sync_file",
- NULL }},
- {.filesystem = NULL,
- .optional_filesystem = NULL,
- .family = NULL,
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = 0, .fs_maps = NULL, .fs_obj = NULL}};
-
-ebpf_sync_syscalls_t local_syscalls[] = {
- {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- }
-};
-
-
-// Link with cgroup.plugin
-netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
-int shm_fd_ebpf_cgroup = -1;
-sem_t *shm_sem_ebpf_cgroup = SEM_FAILED;
-pthread_mutex_t mutex_cgroup_shm;
-
-//Network viewer
-ebpf_network_viewer_options_t network_viewer_opt;
-
-// Statistic
-ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0,
- .probes = 0, .retprobes = 0, .trampolines = 0, .memlock_kern = 0,
- .hash_tables = 0};
-netdata_ebpf_judy_pid_t ebpf_judy_pid = {.pid_table = NULL, .index = {.JudyLArray = NULL}};
-bool ebpf_plugin_exit = false;
-
-#ifdef LIBBPF_MAJOR_VERSION
-struct btf *default_btf = NULL;
-struct cachestat_bpf *cachestat_bpf_obj = NULL;
-struct dc_bpf *dc_bpf_obj = NULL;
-struct disk_bpf *disk_bpf_obj = NULL;
-struct fd_bpf *fd_bpf_obj = NULL;
-struct hardirq_bpf *hardirq_bpf_obj = NULL;
-struct mdflush_bpf *mdflush_bpf_obj = NULL;
-struct mount_bpf *mount_bpf_obj = NULL;
-struct shm_bpf *shm_bpf_obj = NULL;
-struct socket_bpf *socket_bpf_obj = NULL;
-struct swap_bpf *bpf_obj = NULL;
-struct vfs_bpf *vfs_bpf_obj = NULL;
-#else
-void *default_btf = NULL;
-#endif
-char *btf_path = NULL;
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO MANIPULATE JUDY ARRAY
- *
- *****************************************************************/
-
-/**
- * Hashtable insert unsafe
- *
- * Find or create a value associated to the index
- *
- * @return The lsocket = 0 when new item added to the array otherwise the existing item value is returned in *lsocket
- * we return a pointer to a pointer, so that the caller can put anything needed at the value of the index.
- * The pointer to pointer we return has to be used before any other operation that may change the index (insert/delete).
- *
- */
-void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key)
-{
- JError_t J_Error;
- Pvoid_t *idx = JudyLIns(arr, key, &J_Error);
- if (unlikely(idx == PJERR)) {
- netdata_log_error("Cannot add PID to JudyL, JU_ERRNO_* == %u, ID == %d",
- JU_ERRNO(&J_Error), JU_ERRID(&J_Error));
- }
-
- return idx;
-}
-
-/**
- * Get PID from judy
- *
- * Get a pointer for the `pid` from judy_array;
- *
- * @param judy_array a judy array where PID is the primary key
- * @param pid pid stored.
- */
-netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid)
-{
- netdata_ebpf_judy_pid_stats_t **pid_pptr =
- (netdata_ebpf_judy_pid_stats_t **)ebpf_judy_insert_unsafe(judy_array, pid);
- netdata_ebpf_judy_pid_stats_t *pid_ptr = *pid_pptr;
- if (likely(*pid_pptr == NULL)) {
- // a new PID added to the index
- *pid_pptr = aral_mallocz(ebpf_judy_pid.pid_table);
-
- pid_ptr = *pid_pptr;
-
- pid_ptr->cmdline = NULL;
- pid_ptr->socket_stats.JudyLArray = NULL;
- rw_spinlock_init(&pid_ptr->socket_stats.rw_spinlock);
- }
-
- return pid_ptr;
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO ALLOCATE APPS/CGROUP MEMORIES (ARAL)
- *
- *****************************************************************/
-
-/**
- * Allocate PID ARAL
- *
- * Allocate memory using ARAL functions to speed up processing.
- *
- * @param name the internal name used for allocated region.
- * @param size size of each element inside allocated space
- *
- * @return It returns the address on success and NULL otherwise.
- */
-ARAL *ebpf_allocate_pid_aral(char *name, size_t size)
-{
- static size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
- if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
- netdata_log_error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
- max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
- }
-
- return aral_create(name, size,
- 0, max_elements,
- NULL, NULL, NULL, false, false);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
- *
- *****************************************************************/
-
-/**
- * Wait to avoid possible coredumps while process is closing.
- */
-static inline void ebpf_check_before2go()
-{
- int i = EBPF_OPTION_ALL_CHARTS;
- usec_t max = USEC_PER_SEC, step = 200000;
- while (i && max) {
- max -= step;
- sleep_usec(step);
- i = 0;
- int j;
- pthread_mutex_lock(&ebpf_exit_cleanup);
- for (j = 0; ebpf_modules[j].info.thread_name != NULL; j++) {
- if (ebpf_modules[j].enabled < NETDATA_THREAD_EBPF_STOPPING)
- i++;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-
- if (i) {
- netdata_log_error("eBPF cannot unload all threads on time, but it will go away");
- }
-}
-
-/**
- * Close the collector gracefully
- */
-static void ebpf_exit()
-{
-#ifdef LIBBPF_MAJOR_VERSION
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (default_btf) {
- btf__free(default_btf);
- default_btf = NULL;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-#endif
-
- char filename[FILENAME_MAX + 1];
- ebpf_pid_file(filename, FILENAME_MAX);
- if (unlink(filename))
- netdata_log_error("Cannot remove PID file %s", filename);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_error("Good bye world! I was PID %d", main_thread_id);
-#endif
- fprintf(stdout, "EXIT\n");
- fflush(stdout);
-
- ebpf_check_before2go();
- pthread_mutex_lock(&mutex_cgroup_shm);
- if (shm_ebpf_cgroup.header) {
- ebpf_unmap_cgroup_shared_memory();
- shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-
- exit(0);
-}
-
-/**
- * Unload loegacy code
- *
- * @param objects objects loaded from eBPF programs
- * @param probe_links links from loader
- */
-void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links)
-{
- if (!probe_links || !objects)
- return;
-
- struct bpf_program *prog;
- size_t j = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[j]);
- j++;
- }
- freez(probe_links);
- if (objects)
- bpf_object__close(objects);
-}
-
-/**
- * Unload Unique maps
- *
- * This function unload all BPF maps from threads using one unique BPF object.
- */
-static void ebpf_unload_unique_maps()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- // These threads are cleaned with other functions
- if (i != EBPF_MODULE_SOCKET_IDX)
- continue;
-
- if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
- if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
- netdata_log_error("Cannot unload maps for thread %s, because it is not stopped.",
- ebpf_modules[i].info.thread_name);
-
- continue;
- }
-
- if (ebpf_modules[i].load == EBPF_LOAD_LEGACY) {
- ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
- continue;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (socket_bpf_obj)
- socket_bpf__destroy(socket_bpf_obj);
-#endif
- }
-}
-
-/**
- * Unload filesystem maps
- *
- * This function unload all BPF maps from filesystem thread.
- */
-static void ebpf_unload_filesystems()
-{
- if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
- ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled < NETDATA_THREAD_EBPF_STOPPING ||
- ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].load != EBPF_LOAD_LEGACY)
- return;
-
- int i;
- for (i = 0; localfs[i].filesystem != NULL; i++) {
- if (!localfs[i].objects)
- continue;
-
- ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
- }
-}
-
-/**
- * Unload sync maps
- *
- * This function unload all BPF maps from sync thread.
- */
-static void ebpf_unload_sync()
-{
- if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
- ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled < NETDATA_THREAD_EBPF_STOPPING)
- return;
-
- int i;
- for (i = 0; local_syscalls[i].syscall != NULL; i++) {
- if (!local_syscalls[i].enabled)
- continue;
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (local_syscalls[i].sync_obj) {
- sync_bpf__destroy(local_syscalls[i].sync_obj);
- continue;
- }
-#endif
- ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
- }
-}
-
-/**
- * Close the collector gracefully
- *
- * @param sig is the signal number used to close the collector
- */
-void ebpf_stop_threads(int sig)
-{
- UNUSED(sig);
- static int only_one = 0;
-
- // Child thread should be closed by itself.
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (main_thread_id != gettid() || only_one) {
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- only_one = 1;
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name != NULL; i++) {
- if (ebpf_modules[i].enabled < NETDATA_THREAD_EBPF_STOPPING) {
- netdata_thread_cancel(*ebpf_modules[i].thread->thread);
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Sending cancel for thread %s", ebpf_modules[i].info.thread_name);
-#endif
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- netdata_thread_cancel(*cgroup_integration_thread.thread);
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Sending cancel for thread %s", cgroup_integration_thread.name);
-#endif
- pthread_mutex_unlock(&mutex_cgroup_shm);
-
- ebpf_plugin_exit = true;
-
- ebpf_check_before2go();
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- ebpf_unload_unique_maps();
- ebpf_unload_filesystems();
- ebpf_unload_sync();
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- ebpf_exit();
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create apps for module
- *
- * Create apps chart that will be used with specific module
- *
- * @param em the module main structure.
- * @param root a pointer for the targets.
- */
-static inline void ebpf_create_apps_for_module(ebpf_module_t *em, struct ebpf_target *root) {
- if (em->enabled < NETDATA_THREAD_EBPF_STOPPING && em->apps_charts && em->functions.apps_routine)
- em->functions.apps_routine(em, root);
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param root a pointer for the targets.
- */
-static void ebpf_create_apps_charts(struct ebpf_target *root)
-{
- if (unlikely(!ebpf_all_pids))
- return;
-
- struct ebpf_target *w;
- int newly_added = 0;
-
- for (w = root; w; w = w->next) {
- if (w->target)
- continue;
-
- if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
- struct ebpf_pid_on_target *pid_on_target;
-
- fprintf(
- stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
- (w->processes == 1) ? "" : "es");
-
- for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
- fprintf(stderr, " %d", pid_on_target->pid);
- }
-
- fputc('\n', stderr);
- }
-
- if (!w->exposed && w->processes) {
- newly_added++;
- w->exposed = 1;
- if (debug_enabled || w->debug_enabled)
- debug_log_int("%s just added - regenerating charts.", w->name);
- }
- }
-
- int i;
- if (!newly_added) {
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *current = &ebpf_modules[i];
- if (current->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- continue;
-
- ebpf_create_apps_for_module(current, root);
- }
- return;
- }
-
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *current = &ebpf_modules[i];
- ebpf_create_apps_for_module(current, root);
- }
-}
-
-/**
- * Get a value from a structure.
- *
- * @param basis it is the first address of the structure
- * @param offset it is the offset of the data you want to access.
- * @return
- */
-collected_number get_value_from_structure(char *basis, size_t offset)
-{
- collected_number *value = (collected_number *)(basis + offset);
-
- collected_number ret = (collected_number)llabs(*value);
- // this reset is necessary to avoid keep a constant value while processing is not executing a task
- *value = 0;
-
- return ret;
-}
-
-/**
- * Write set command on standard output
- *
- * @param dim the dimension name
- * @param value the value for the dimension
- */
-void write_chart_dimension(char *dim, long long value)
-{
- printf("SET %s = %lld\n", dim, value);
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param name the chart name
- * @param family the chart family
- * @param move the pointer with the values that will be published
- * @param end the number of values that will be written on standard output
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- uint32_t i = 0;
- while (move && i < end) {
- write_chart_dimension(move->name, move->ncall);
-
- move = move->next;
- i++;
- }
-
- ebpf_write_end_chart();
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param name the chart name
- * @param family the chart family
- * @param move the pointer with the values that will be published
- * @param end the number of values that will be written on standard output
- */
-void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- int i = 0;
- while (move && i < end) {
- write_chart_dimension(move->name, move->nerr);
-
- move = move->next;
- i++;
- }
-
- ebpf_write_end_chart();
-}
-
-/**
- * Write charts
- *
- * Write the current information to publish the charts.
- *
- * @param family chart family
- * @param chart chart id
- * @param dim dimension name
- * @param v1 value.
- */
-void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1)
-{
- ebpf_write_begin_chart(family, chart, "");
-
- write_chart_dimension(dim, v1);
-
- ebpf_write_end_chart();
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param chart the chart name
- * @param family the chart family
- * @param dwrite the dimension name
- * @param vwrite the value for previous dimension
- * @param dread the dimension name
- * @param vread the value for previous dimension
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
-{
- ebpf_write_begin_chart(family, chart, "");
-
- write_chart_dimension(dwrite, vwrite);
- write_chart_dimension(dread, vread);
-
- ebpf_write_end_chart();
-}
-
-/**
- * Write chart cmd on standard output
- *
- * @param type chart type
- * @param id chart id (the apps group name).
- * @param suffix suffix to differentiate charts
- * @param title chart title
- * @param units units label
- * @param family group name used to attach the chart on dashboard
- * @param charttype chart type
- * @param context chart context
- * @param order chart order
- * @param update_every update interval used by plugin
- * @param module chart module name, this is the eBPF thread.
- */
-void ebpf_write_chart_cmd(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every, char *module)
-{
- printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n",
- type,
- id,
- suffix,
- title,
- units,
- (family)?family:"",
- (context)?context:"",
- (charttype)?charttype:"",
- order,
- update_every,
- module);
-}
-
-/**
- * Write chart cmd on standard output
- *
- * @param type chart type
- * @param id chart id
- * @param suffix add suffix to obsolete charts.
- * @param title chart title
- * @param units units label
- * @param family group name used to attach the chart on dashboard
- * @param charttype chart type
- * @param context chart context
- * @param order chart order
- * @param update_every value to overwrite the update frequency set by the server.
- */
-void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every)
-{
- printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n",
- type,
- id,
- suffix,
- title,
- units,
- (family)?family:"",
- (context)?context:"",
- (charttype)?charttype:"",
- order,
- update_every);
-}
-
-/**
- * Write the dimension command on standard output
- *
- * @param name the dimension name
- * @param id the dimension id
- * @param algo the dimension algorithm
- */
-void ebpf_write_global_dimension(char *name, char *id, char *algorithm)
-{
- printf("DIMENSION %s %s %s 1 1\n", name, id, algorithm);
-}
-
-/**
- * Call ebpf_write_global_dimension to create the dimensions for a specific chart
- *
- * @param ptr a pointer to a structure of the type netdata_publish_syscall_t
- * @param end the number of dimensions for the structure ptr
- */
-void ebpf_create_global_dimension(void *ptr, int end)
-{
- netdata_publish_syscall_t *move = ptr;
-
- int i = 0;
- while (move && i < end) {
- ebpf_write_global_dimension(move->name, move->dimension, move->algorithm);
-
- move = move->next;
- i++;
- }
-}
-
-/**
- * Call write_chart_cmd to create the charts
- *
- * @param type chart type
- * @param id chart id
- * @param title chart title
- * @param units axis label
- * @param family group name used to attach the chart on dashboard
- * @param context chart context
- * @param charttype chart type
- * @param order order number of the specified chart
- * @param ncd a pointer to a function called to create dimensions
- * @param move a pointer for a structure that has the dimensions
- * @param end number of dimensions for the chart created
- * @param update_every update interval used with chart.
- * @param module chart module name, this is the eBPF thread.
- */
-void ebpf_create_chart(char *type,
- char *id,
- char *title,
- char *units,
- char *family,
- char *context,
- char *charttype,
- int order,
- void (*ncd)(void *, int),
- void *move,
- int end,
- int update_every,
- char *module)
-{
- ebpf_write_chart_cmd(type, id, "", title, units, family, charttype, context, order, update_every, module);
-
- if (ncd) {
- ncd(move, end);
- }
-}
-
-/**
- * Call the necessary functions to create a name.
- *
- * @param family family name
- * @param name chart name
- * @param hist0 histogram values
- * @param dimensions dimension values.
- * @param end number of bins that will be sent to Netdata.
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- uint32_t i;
- for (i = 0; i < end; i++) {
- write_chart_dimension(dimensions[i], (long long) hist[i]);
- }
-
- ebpf_write_end_chart();
-
- fflush(stdout);
-}
-
-/**
- * ARAL Charts
- *
- * Add chart to monitor ARAL usage
- * Caller must call this function with mutex locked.
- *
- * @param name the name used to create aral
- * @param em a pointer to the structure with the default values.
- */
-int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
-{
- static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN;
- char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
- char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
-
- snprintfz(em->memory_usage, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_size", name);
- snprintfz(em->memory_allocations, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_alloc", name);
-
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- em->memory_usage,
- "",
- "Bytes allocated for ARAL.",
- "bytes",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_size",
- priority++,
- em->update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(mem,
- mem,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- priority++,
- em->update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(aral,
- aral,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- return priority - 2;
-}
-
-/**
- * ARAL Charts
- *
- * Add chart to monitor ARAL usage
- * Caller must call this function with mutex locked.
- *
- * @param em a pointer to the structure with the default values.
- * @param prio the initial priority used to disable charts.
- */
-void ebpf_statistic_obsolete_aral_chart(ebpf_module_t *em, int prio)
-{
- ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- prio++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- prio++,
- em->update_every);
-}
-
-/**
- * Send data from aral chart
- *
- * Send data for eBPF plugin
- *
- * @param memory a pointer to the allocated address
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
-{
- char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
- char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
-
- struct aral_statistics *stats = aral_statistics(memory);
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, "");
- write_chart_dimension(mem, (long long)stats->structures.allocated_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations, "");
- write_chart_dimension(aral, (long long)stats->structures.allocations);
- ebpf_write_end_chart();
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO READ GLOBAL HASH TABLES
- *
- *****************************************************************/
-
-/**
- * Read Global Table Stats
- *
- * Read data from specified table (map_fd) using array allocated inside thread(values) and storing
- * them in stats vector starting from the first position.
- *
- * For PID tables is recommended to use a function to parse the specific data.
- *
- * @param stats vector used to store data
- * @param values helper to read data from hash tables.
- * @param map_fd table that has data
- * @param maps_per_core Is necessary to read data from all cores?
- * @param begin initial value to query hash table
- * @param end last value that will not be used.
- */
-void ebpf_read_global_table_stats(netdata_idx_t *stats,
- netdata_idx_t *values,
- int map_fd,
- int maps_per_core,
- uint32_t begin,
- uint32_t end)
-{
- uint32_t idx, order;
-
- for (idx = begin, order = 0; idx < end; idx++, order++) {
- if (!bpf_map_lookup_elem(map_fd, &idx, values)) {
- int i;
- int before = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < before; i++)
- total += values[i];
-
- stats[order] = total;
- }
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED WITH SOCKET
- *
- *****************************************************************/
-
-/**
- * Netmask
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param prefix create the netmask based in the CIDR value.
- *
- * @return
- */
-static inline in_addr_t ebpf_netmask(int prefix) {
-
- if (prefix == 0)
- return (~((in_addr_t) - 1));
- else
- return (in_addr_t)(~((1 << (32 - prefix)) - 1));
-
-}
-
-/**
- * Broadcast
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param addr is the ip address
- * @param prefix is the CIDR value.
- *
- * @return It returns the last address of the range
- */
-static inline in_addr_t ebpf_broadcast(in_addr_t addr, int prefix)
-{
- return (addr | ~ebpf_netmask(prefix));
-}
-
-/**
- * Network
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param addr is the ip address
- * @param prefix is the CIDR value.
- *
- * @return It returns the first address of the range.
- */
-static inline in_addr_t ebpf_ipv4_network(in_addr_t addr, int prefix)
-{
- return (addr & ebpf_netmask(prefix));
-}
-
-/**
- * Calculate ipv6 first address
- *
- * @param out the address to store the first address.
- * @param in the address used to do the math.
- * @param prefix number of bits used to calculate the address
- */
-static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
-{
- uint64_t mask,tmp;
- uint64_t ret[2];
-
- memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
-
- if (prefix == 128) {
- memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
- return;
- } else if (!prefix) {
- ret[0] = ret[1] = 0;
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
- return;
- } else if (prefix <= 64) {
- ret[1] = 0ULL;
-
- tmp = be64toh(ret[0]);
- mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
- tmp &= mask;
- ret[0] = htobe64(tmp);
- } else {
- mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
- tmp = be64toh(ret[1]);
- tmp &= mask;
- ret[1] = htobe64(tmp);
- }
-
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
-}
-
-/**
- * Get IPV6 Last Address
- *
- * @param out the address to store the last address.
- * @param in the address used to do the math.
- * @param prefix number of bits used to calculate the address
- */
-static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
-{
- uint64_t mask,tmp;
- uint64_t ret[2];
- memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
-
- if (prefix == 128) {
- memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
- return;
- } else if (!prefix) {
- ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF;
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
- return;
- } else if (prefix <= 64) {
- ret[1] = 0xFFFFFFFFFFFFFFFFULL;
-
- tmp = be64toh(ret[0]);
- mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
- tmp |= ~mask;
- ret[0] = htobe64(tmp);
- } else {
- mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
- tmp = be64toh(ret[1]);
- tmp |= ~mask;
- ret[1] = htobe64(tmp);
- }
-
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
-}
-
-/**
- * IP to network long
- *
- * @param dst the vector to store the result
- * @param ip the source ip given by our users.
- * @param domain the ip domain (IPV4 or IPV6)
- * @param source the original string
- *
- * @return it returns 0 on success and -1 otherwise.
- */
-static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source)
-{
- if (inet_pton(domain, ip, dst) <= 0) {
- netdata_log_error("The address specified (%s) is invalid ", source);
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Clean port Structure
- *
- * Clean the allocated list.
- *
- * @param clean the list that will be cleaned
- */
-void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean)
-{
- ebpf_network_viewer_port_list_t *move = *clean;
- while (move) {
- ebpf_network_viewer_port_list_t *next = move->next;
- freez(move->value);
- freez(move);
-
- move = next;
- }
- *clean = NULL;
-}
-
-/**
- * Clean IP structure
- *
- * Clean the allocated list.
- *
- * @param clean the list that will be cleaned
- */
-void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
-{
- ebpf_network_viewer_ip_list_t *move = *clean;
- while (move) {
- ebpf_network_viewer_ip_list_t *next = move->next;
- freez(move->value);
- freez(move);
-
- move = next;
- }
- *clean = NULL;
-}
-
-/**
- * Parse IP List
- *
- * Parse IP list and link it.
- *
- * @param out a pointer to store the link list
- * @param ip the value given as parameter
- */
-static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
-{
- ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
-
- char *ipdup = strdupz(ip);
- union netdata_ip_t first = { };
- union netdata_ip_t last = { };
- char *is_ipv6;
- if (*ip == '*' && *(ip+1) == '\0') {
- memset(first.addr8, 0, sizeof(first.addr8));
- memset(last.addr8, 0xFF, sizeof(last.addr8));
-
- is_ipv6 = ip;
-
- ebpf_clean_ip_structure(list);
- goto storethisip;
- }
-
- char *end = ip;
- // Move while I cannot find a separator
- while (*end && *end != '/' && *end != '-') end++;
-
- // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future
- // https://tools.ietf.org/html/rfc1924
- is_ipv6 = strchr(ip, ':');
-
- int select;
- if (*end && !is_ipv6) { // IPV4 range
- select = (*end == '/') ? 0 : 1;
- *end++ = '\0';
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- if (!select) { // CIDR
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- select = (int) str2i(end);
- if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) {
- netdata_log_info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip);
- goto cleanipdup;
- }
-
- last.addr32[0] = htonl(ebpf_broadcast(ntohl(first.addr32[0]), select));
- // This was added to remove
- // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977
- UNUSED(last.addr32[0]);
-
- uint32_t ipv4_test = htonl(ebpf_ipv4_network(ntohl(first.addr32[0]), select));
- if (first.addr32[0] != ipv4_test) {
- first.addr32[0] = ipv4_test;
- struct in_addr ipv4_convert;
- ipv4_convert.s_addr = ipv4_test;
- char ipv4_msg[INET_ADDRSTRLEN];
- if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN))
- netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg);
- }
- } else { // Range
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- select = ebpf_ip2nl(last.addr8, end, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
- }
-
- if (htonl(first.addr32[0]) > htonl(last.addr32[0])) {
- netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
- ipdup);
- goto cleanipdup;
- }
- } else if (is_ipv6) { // IPV6
- if (!*end) { // Unique
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- memcpy(last.addr8, first.addr8, sizeof(first.addr8));
- } else if (*end == '-') {
- *end++ = 0x00;
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- select = ebpf_ip2nl(last.addr8, end, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
- } else { // CIDR
- *end++ = 0x00;
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- select = str2i(end);
- if (select < 0 || select > 128) {
- netdata_log_info("The CIDR %s is not valid, the address %s will be ignored.", end, ip);
- goto cleanipdup;
- }
-
- uint64_t prefix = (uint64_t)select;
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- get_ipv6_last_addr(&last, &first, prefix);
-
- union netdata_ip_t ipv6_test;
- get_ipv6_first_addr(&ipv6_test, &first, prefix);
-
- if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) {
- memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t));
-
- struct in6_addr ipv6_convert;
- memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t));
-
- char ipv6_msg[INET6_ADDRSTRLEN];
- if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN))
- netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg);
- }
- }
-
- if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) &&
- !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) ||
- (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) {
- netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
- ipdup);
- goto cleanipdup;
- }
- } else { // Unique ip
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- memcpy(last.addr8, first.addr8, sizeof(first.addr8));
- }
-
- ebpf_network_viewer_ip_list_t *store;
-
- storethisip:
- store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
- store->value = ipdup;
- store->hash = simple_hash(ipdup);
- store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6;
- memcpy(store->first.addr8, first.addr8, sizeof(first.addr8));
- memcpy(store->last.addr8, last.addr8, sizeof(last.addr8));
-
- ebpf_fill_ip_list_unsafe(list, store, "socket");
- return;
-
- cleanipdup:
- freez(ipdup);
-}
-
-/**
- * Parse IP Range
- *
- * Parse the IP ranges given and create Network Viewer IP Structure
- *
- * @param ptr is a pointer with the text to parse.
- */
-void ebpf_parse_ips_unsafe(char *ptr)
-{
- // No value
- if (unlikely(!ptr))
- return;
-
- while (likely(ptr)) {
- // Move forward until next valid character
- while (isspace(*ptr)) ptr++;
-
- // No valid value found
- if (unlikely(!*ptr))
- return;
-
- // Find space that ends the list
- char *end = strchr(ptr, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*ptr == '!') {
- neg++;
- ptr++;
- }
-
- if (isascii(*ptr)) { // Parse port
- ebpf_parse_ip_list_unsafe(
- (!neg) ? (void **)&network_viewer_opt.included_ips : (void **)&network_viewer_opt.excluded_ips, ptr);
- }
-
- ptr = end;
- }
-}
-
-/**
- * Fill Port list
- *
- * @param out a pointer to the link list.
- * @param in the structure that will be linked.
- */
-static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in)
-{
- if (likely(*out)) {
- ebpf_network_viewer_port_list_t *move = *out, *store = *out;
- uint16_t first = ntohs(in->first);
- uint16_t last = ntohs(in->last);
- while (move) {
- uint16_t cmp_first = ntohs(move->first);
- uint16_t cmp_last = ntohs(move->last);
- if (cmp_first <= first && first <= cmp_last &&
- cmp_first <= last && last <= cmp_last ) {
- netdata_log_info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.",
- first, last, cmp_first, cmp_last);
- freez(in->value);
- freez(in);
- return;
- } else if (first <= cmp_first && cmp_first <= last &&
- first <= cmp_last && cmp_last <= last) {
- netdata_log_info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.",
- first, last, cmp_first, cmp_last);
- freez(move->value);
- move->value = in->value;
- move->first = in->first;
- move->last = in->last;
- freez(in);
- return;
- }
-
- store = move;
- move = move->next;
- }
-
- store->next = in;
- } else {
- *out = in;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Adding values %s( %u, %u) to %s port list used on network viewer",
- in->value, in->first, in->last,
- (*out == network_viewer_opt.included_port)?"included":"excluded");
-#endif
-}
-
-/**
- * Parse Service List
- *
- * @param out a pointer to store the link list
- * @param service the service used to create the structure that will be linked.
- */
-static void ebpf_parse_service_list(void **out, char *service)
-{
- ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
- struct servent *serv = getservbyname((const char *)service, "tcp");
- if (!serv)
- serv = getservbyname((const char *)service, "udp");
-
- if (!serv) {
- netdata_log_info("Cannot resolve the service '%s' with protocols TCP and UDP, it will be ignored", service);
- return;
- }
-
- ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
- w->value = strdupz(service);
- w->hash = simple_hash(service);
-
- w->first = w->last = (uint16_t)serv->s_port;
-
- fill_port_list(list, w);
-}
-
-/**
- * Parse port list
- *
- * Parse an allocated port list with the range given
- *
- * @param out a pointer to store the link list
- * @param range the informed range for the user.
- */
-static void ebpf_parse_port_list(void **out, char *range)
-{
- int first, last;
- ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
-
- char *copied = strdupz(range);
- if (*range == '*' && *(range+1) == '\0') {
- first = 1;
- last = 65535;
-
- ebpf_clean_port_structure(list);
- goto fillenvpl;
- }
-
- char *end = range;
- //Move while I cannot find a separator
- while (*end && *end != ':' && *end != '-') end++;
-
- //It has a range
- if (likely(*end)) {
- *end++ = '\0';
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied);
- freez(copied);
- return;
- }
- last = str2i((const char *)end);
- } else {
- last = 0;
- }
-
- first = str2i((const char *)range);
- if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) {
- netdata_log_info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied);
- freez(copied);
- return;
- }
-
- if (!last)
- last = first;
-
- if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) {
- netdata_log_info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied);
- freez(copied);
- return;
- }
-
- if (first > last) {
- netdata_log_info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied);
- freez(copied);
- return;
- }
-
- ebpf_network_viewer_port_list_t *w;
- fillenvpl:
- w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
- w->value = copied;
- w->hash = simple_hash(copied);
- w->first = (uint16_t)first;
- w->last = (uint16_t)last;
- w->cmp_first = (uint16_t)first;
- w->cmp_last = (uint16_t)last;
-
- fill_port_list(list, w);
-}
-
-/**
- * Parse Port Range
- *
- * Parse the port ranges given and create Network Viewer Port Structure
- *
- * @param ptr is a pointer with the text to parse.
- */
-void ebpf_parse_ports(char *ptr)
-{
- // No value
- if (unlikely(!ptr))
- return;
-
- while (likely(ptr)) {
- // Move forward until next valid character
- while (isspace(*ptr)) ptr++;
-
- // No valid value found
- if (unlikely(!*ptr))
- return;
-
- // Find space that ends the list
- char *end = strchr(ptr, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*ptr == '!') {
- neg++;
- ptr++;
- }
-
- if (isdigit(*ptr)) { // Parse port
- ebpf_parse_port_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- } else if (isalpha(*ptr)) { // Parse service
- ebpf_parse_service_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- } else if (*ptr == '*') { // All
- ebpf_parse_port_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- }
-
- ptr = end;
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO DEFINE OPTIONS
- *
- *****************************************************************/
-
-/**
- * Define labels used to generate charts
- *
- * @param is structure with information about number of calls made for a function.
- * @param pio structure used to generate charts.
- * @param dim a pointer for the dimensions name
- * @param name a pointer for the tensor with the name of the functions.
- * @param algorithm a vector with the algorithms used to make the charts
- * @param end the number of elements in the previous 4 arguments.
- */
-void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *pio, char **dim,
- char **name, int *algorithm, int end)
-{
- int i;
-
- netdata_syscall_stat_t *prev = NULL;
- netdata_publish_syscall_t *publish_prev = NULL;
- for (i = 0; i < end; i++) {
- if (prev) {
- prev->next = &is[i];
- }
- prev = &is[i];
-
- pio[i].dimension = dim[i];
- pio[i].name = name[i];
- pio[i].algorithm = ebpf_algorithms[algorithm[i]];
- if (publish_prev) {
- publish_prev->next = &pio[i];
- }
- publish_prev = &pio[i];
- }
-}
-
-/**
- * Define thread mode for all ebpf program.
- *
- * @param lmode the mode that will be used for them.
- */
-static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_modules[i].mode = lmode;
- }
-}
-
-/**
- * Enable specific charts selected by user.
- *
- * @param em the structure that will be changed
- * @param disable_cgroup the status about the cgroups charts.
- */
-static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_cgroup)
-{
- em->enabled = NETDATA_THREAD_EBPF_RUNNING;
-
- if (!disable_cgroup) {
- em->cgroup_charts = CONFIG_BOOLEAN_YES;
- }
-
- em->global_charts = CONFIG_BOOLEAN_YES;
-}
-
-/**
- * Disable all Global charts
- *
- * Disable charts
- */
-static inline void disable_all_global_charts()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
- ebpf_modules[i].global_charts = 0;
- }
-}
-
-/**
- * Enable the specified chart group
- *
- * @param idx the index of ebpf_modules that I am enabling
- */
-static inline void ebpf_enable_chart(int idx, int disable_cgroup)
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- if (i == idx) {
- ebpf_enable_specific_chart(&ebpf_modules[i], disable_cgroup);
- break;
- }
- }
-}
-
-/**
- * Disable Cgroups
- *
- * Disable charts for apps loading only global charts.
- */
-static inline void ebpf_disable_cgroups()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].cgroup_charts = 0;
- }
-}
-
-/**
- * Update Disabled Plugins
- *
- * This function calls ebpf_update_stats to update statistics for collector.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-void ebpf_update_disabled_plugin_stats(ebpf_module_t *em)
-{
- pthread_mutex_lock(&lock);
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&lock);
-}
-
-/**
- * Print help on standard error for user knows how to use the collector.
- */
-void ebpf_print_help()
-{
- const time_t t = time(NULL);
- struct tm ct;
- struct tm *test = localtime_r(&t, &ct);
- int year;
- if (test)
- year = ct.tm_year;
- else
- year = 0;
-
- fprintf(stderr,
- "\n"
- " Netdata ebpf.plugin %s\n"
- " Copyright (C) 2016-%d Costa Tsaousis <costa@tsaousis.gr>\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This eBPF.plugin is a data collector plugin for netdata.\n"
- "\n"
- " This plugin only accepts long options with one or two dashes. The available command line options are:\n"
- "\n"
- " SECONDS Set the data collection frequency.\n"
- "\n"
- " [-]-help Show this help.\n"
- "\n"
- " [-]-version Show software version.\n"
- "\n"
- " [-]-global Disable charts per application and cgroup.\n"
- "\n"
- " [-]-all Enable all chart groups (global, apps, and cgroup), unless -g is also given.\n"
- "\n"
- " [-]-cachestat Enable charts related to process run time.\n"
- "\n"
- " [-]-dcstat Enable charts related to directory cache.\n"
- "\n"
- " [-]-disk Enable charts related to disk monitoring.\n"
- "\n"
- " [-]-filesystem Enable chart related to filesystem run time.\n"
- "\n"
- " [-]-hardirq Enable chart related to hard IRQ latency.\n"
- "\n"
- " [-]-mdflush Enable charts related to multi-device flush.\n"
- "\n"
- " [-]-mount Enable charts related to mount monitoring.\n"
- "\n"
- " [-]-net Enable network viewer charts.\n"
- "\n"
- " [-]-oomkill Enable chart related to OOM kill tracking.\n"
- "\n"
- " [-]-process Enable charts related to process run time.\n"
- "\n"
- " [-]-return Run the collector in return mode.\n"
- "\n"
- " [-]-shm Enable chart related to shared memory tracking.\n"
- "\n"
- " [-]-softirq Enable chart related to soft IRQ latency.\n"
- "\n"
- " [-]-sync Enable chart related to sync run time.\n"
- "\n"
- " [-]-swap Enable chart related to swap run time.\n"
- "\n"
- " [-]-vfs Enable chart related to vfs run time.\n"
- "\n"
- " [-]-legacy Load legacy eBPF programs.\n"
- "\n"
- " [-]-core Use CO-RE when available(Working in progress).\n"
- "\n",
- VERSION,
- (year >= 116) ? year + 1900 : 2020);
-}
-
-/*****************************************************************
- *
- * TRACEPOINT MANAGEMENT FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Enable a tracepoint.
- *
- * @return 0 on success, -1 on error.
- */
-int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp)
-{
- int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
-
- // err?
- if (test == -1) {
- return -1;
- }
- // disabled?
- else if (test == 0) {
- // enable it then.
- if (ebpf_enable_tracing_values(tp->class, tp->event)) {
- return -1;
- }
- }
-
- // enabled now or already was.
- tp->enabled = true;
-
- return 0;
-}
-
-/**
- * Disable a tracepoint if it's enabled.
- *
- * @return 0 on success, -1 on error.
- */
-int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp)
-{
- int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
-
- // err?
- if (test == -1) {
- return -1;
- }
- // enabled?
- else if (test == 1) {
- // disable it then.
- if (ebpf_disable_tracing_values(tp->class, tp->event)) {
- return -1;
- }
- }
-
- // disable now or already was.
- tp->enabled = false;
-
- return 0;
-}
-
-/**
- * Enable multiple tracepoints on a list of tracepoints which end when the
- * class is NULL.
- *
- * @return the number of successful enables.
- */
-uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps)
-{
- uint32_t cnt = 0;
- for (int i = 0; tps[i].class != NULL; i++) {
- if (ebpf_enable_tracepoint(&tps[i]) == -1) {
- netdata_log_error("Failed to enable tracepoint %s:%s", tps[i].class, tps[i].event);
- }
- else {
- cnt += 1;
- }
- }
- return cnt;
-}
-
-/*****************************************************************
- *
- * AUXILIARY FUNCTIONS USED DURING INITIALIZATION
- *
- *****************************************************************/
-
-/**
- * Is ip inside the range
- *
- * Check if the ip is inside a IP range
- *
- * @param rfirst the first ip address of the range
- * @param rlast the last ip address of the range
- * @param cmpfirst the first ip to compare
- * @param cmplast the last ip to compare
- * @param family the IP family
- *
- * @return It returns 1 if the IP is inside the range and 0 otherwise
- */
-static int ebpf_is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
- union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
-{
- if (family == AF_INET) {
- if ((rfirst->addr32[0] <= cmpfirst->addr32[0]) && (rlast->addr32[0] >= cmplast->addr32[0]))
- return 1;
- } else {
- if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 &&
- memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) {
- return 1;
- }
-
- }
- return 0;
-}
-
-/**
- * Fill IP list
- *
- * @param out a pointer to the link list.
- * @param in the structure that will be linked.
- * @param table the modified table.
- */
-void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in,
- char *table __maybe_unused)
-{
- if (in->ver == AF_INET) { // It is simpler to compare using host order
- in->first.addr32[0] = ntohl(in->first.addr32[0]);
- in->last.addr32[0] = ntohl(in->last.addr32[0]);
- }
- if (likely(*out)) {
- ebpf_network_viewer_ip_list_t *move = *out, *store = *out;
- while (move) {
- if (in->ver == move->ver &&
- ebpf_is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.",
- in->value, move->value);
-#endif
- freez(in->value);
- freez(in);
- return;
- }
- store = move;
- move = move->next;
- }
-
- store->next = in;
- } else {
- *out = in;
- }
-
-#ifdef NETDATA_DEV_MODE
- char first[256], last[512];
- if (in->ver == AF_INET) {
- netdata_log_info("Adding values %s: (%u - %u) to %s IP list \"%s\" used on network viewer",
- in->value, in->first.addr32[0], in->last.addr32[0],
- (*out == network_viewer_opt.included_ips)?"included":"excluded",
- table);
- } else {
- if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) &&
- inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN))
- netdata_log_info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
- first, last,
- (*out == network_viewer_opt.included_ips)?"included":"excluded",
- table);
- }
-#endif
-}
-
-/**
- * Link hostname
- *
- * @param out is the output link list
- * @param in the hostname to add to list.
- */
-static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in)
-{
- if (likely(*out)) {
- ebpf_network_viewer_hostname_list_t *move = *out;
- for (; move->next ; move = move->next ) {
- if (move->hash == in->hash && !strcmp(move->value, in->value)) {
- netdata_log_info("The hostname %s was already inserted, it will be ignored.", in->value);
- freez(in->value);
- simple_pattern_free(in->value_pattern);
- freez(in);
- return;
- }
- }
-
- move->next = in;
- } else {
- *out = in;
- }
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Adding value %s to %s hostname list used on network viewer",
- in->value,
- (*out == network_viewer_opt.included_hostnames)?"included":"excluded");
-#endif
-}
-
-/**
- * Link Hostnames
- *
- * Parse the list of hostnames to create the link list.
- * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP.
- *
- * @param out is the output link list
- * @param parse is a pointer with the text to parser.
- */
-static void ebpf_link_hostnames(char *parse)
-{
- // No value
- if (unlikely(!parse))
- return;
-
- while (likely(parse)) {
- // Find the first valid value
- while (isspace(*parse)) parse++;
-
- // No valid value found
- if (unlikely(!*parse))
- return;
-
- // Find space that ends the list
- char *end = strchr(parse, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*parse == '!') {
- neg++;
- parse++;
- }
-
- ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t));
- hostname->value = strdupz(parse);
- hostname->hash = simple_hash(parse);
- hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT, true);
-
- ebpf_link_hostname((!neg) ? &network_viewer_opt.included_hostnames :
- &network_viewer_opt.excluded_hostnames,
- hostname);
-
- parse = end;
- }
-}
-
-/**
- * Parse network viewer section
- *
- * @param cfg the configuration structure
- */
-void parse_network_viewer_section(struct config *cfg)
-{
- network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- EBPF_CONFIG_RESOLVE_HOSTNAME,
- CONFIG_BOOLEAN_NO);
-
- network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- EBPF_CONFIG_RESOLVE_SERVICE,
- CONFIG_BOOLEAN_YES);
-
- char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
- ebpf_parse_ports(value);
-
- if (network_viewer_opt.hostname_resolution_enabled) {
- value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_HOSTNAMES, NULL);
- ebpf_link_hostnames(value);
- } else {
- netdata_log_info("Name resolution is disabled, collector will not parse \"hostnames\" list.");
- }
-
- value = appconfig_get(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- "ips",
- NULL);
- //"ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128");
- ebpf_parse_ips_unsafe(value);
-}
-
-/**
- * Read Local Ports
- *
- * Parse /proc/net/{tcp,udp} and get the ports Linux is listening.
- *
- * @param filename the proc file to parse.
- * @param proto is the magic number associated to the protocol file we are reading.
- */
-static void read_local_ports(char *filename, uint8_t proto)
-{
- procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (!ff)
- return;
-
- ff = procfile_readall(ff);
- if (!ff)
- return;
-
- size_t lines = procfile_lines(ff), l;
- netdata_passive_connection_t values = {.counter = 0, .tgid = 0, .pid = 0};
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- // This is header or end of file
- if (unlikely(words < 14))
- continue;
-
- // https://elixir.bootlin.com/linux/v5.7.8/source/include/net/tcp_states.h
- // 0A = TCP_LISTEN
- if (strcmp("0A", procfile_lineword(ff, l, 5)))
- continue;
-
- // Read local port
- uint16_t port = (uint16_t)strtol(procfile_lineword(ff, l, 2), NULL, 16);
- update_listen_table(htons(port), proto, &values);
- }
-
- procfile_close(ff);
-}
-
-/**
- * Read Local addresseses
- *
- * Read the local address from the interfaces.
- */
-void ebpf_read_local_addresses_unsafe()
-{
- struct ifaddrs *ifaddr, *ifa;
- if (getifaddrs(&ifaddr) == -1) {
- netdata_log_error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections");
- return;
- }
-
- char *notext = { "No text representation" };
- for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr == NULL)
- continue;
-
- if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6))
- continue;
-
- ebpf_network_viewer_ip_list_t *w = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
-
- int family = ifa->ifa_addr->sa_family;
- w->ver = (uint8_t) family;
- char text[INET6_ADDRSTRLEN];
- if (family == AF_INET) {
- struct sockaddr_in *in = (struct sockaddr_in*) ifa->ifa_addr;
-
- w->first.addr32[0] = in->sin_addr.s_addr;
- w->last.addr32[0] = in->sin_addr.s_addr;
-
- if (inet_ntop(AF_INET, w->first.addr8, text, INET_ADDRSTRLEN)) {
- w->value = strdupz(text);
- w->hash = simple_hash(text);
- } else {
- w->value = strdupz(notext);
- w->hash = simple_hash(notext);
- }
- } else {
- struct sockaddr_in6 *in6 = (struct sockaddr_in6*) ifa->ifa_addr;
-
- memcpy(w->first.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
- memcpy(w->last.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
-
- if (inet_ntop(AF_INET6, w->first.addr8, text, INET_ADDRSTRLEN)) {
- w->value = strdupz(text);
- w->hash = simple_hash(text);
- } else {
- w->value = strdupz(notext);
- w->hash = simple_hash(notext);
- }
- }
-
- ebpf_fill_ip_list_unsafe(
- (family == AF_INET) ? &network_viewer_opt.ipv4_local_ip : &network_viewer_opt.ipv6_local_ip, w, "selector");
- }
-
- freeifaddrs(ifaddr);
-}
-
-/**
- * Start Pthread Variable
- *
- * This function starts all pthread variables.
- */
-void ebpf_start_pthread_variables()
-{
- pthread_mutex_init(&lock, NULL);
- pthread_mutex_init(&ebpf_exit_cleanup, NULL);
- pthread_mutex_init(&collect_data_mutex, NULL);
- pthread_mutex_init(&mutex_cgroup_shm, NULL);
- rw_spinlock_init(&ebpf_judy_pid.index.rw_spinlock);
-}
-
-/**
- * Allocate the vectors used for all threads.
- */
-static void ebpf_allocate_common_vectors()
-{
- ebpf_judy_pid.pid_table = ebpf_allocate_pid_aral(NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME,
- sizeof(netdata_ebpf_judy_pid_stats_t));
- ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *));
- ebpf_aral_init();
-}
-
-/**
- * Define how to load the ebpf programs
- *
- * @param ptr the option given by users
- */
-static inline void ebpf_how_to_load(char *ptr)
-{
- if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN))
- ebpf_set_thread_mode(MODE_RETURN);
- else if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_DEFAULT))
- ebpf_set_thread_mode(MODE_ENTRY);
- else
- netdata_log_error("the option %s for \"ebpf load mode\" is not a valid option.", ptr);
-}
-
-/**
- * Define whether we should have charts for apps
- *
- * @param lmode the mode that will be used for them.
- */
-static inline void ebpf_set_apps_mode(netdata_apps_integration_flags_t value)
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_modules[i].apps_charts = value;
- }
-}
-
-
-/**
- * Update interval
- *
- * Update default interval with value from user
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_update_interval(int update_every)
-{
- int i;
- int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY,
- update_every);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].update_every = value;
- }
-}
-
-/**
- * Update PID table size
- *
- * Update default size with value from user
- */
-static void ebpf_update_table_size()
-{
- int i;
- uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_PID_SIZE, ND_EBPF_DEFAULT_PID_SIZE);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].pid_map_size = value;
- }
-}
-
-/**
- * Update lifetime
- *
- * Update the period of time that specific thread will run
- */
-static void ebpf_update_lifetime()
-{
- int i;
- uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_LIFETIME, EBPF_DEFAULT_LIFETIME);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].lifetime = value;
- }
-}
-
-/**
- * Set Load mode
- *
- * @param origin specify the configuration file loaded
- */
-static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebpf_load_mode_t origin)
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].load &= ~NETDATA_EBPF_LOAD_METHODS;
- ebpf_modules[i].load |= load | origin ;
- }
-}
-
-/**
- * Update mode
- *
- * @param str value read from configuration file.
- * @param origin specify the configuration file loaded
- */
-static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin)
-{
- netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str);
-
- ebpf_set_load_mode(load, origin);
-}
-
-/**
- * Update Map per core
- *
- * Define the map type used with some hash tables.
- */
-static void ebpf_update_map_per_core()
-{
- int i;
- int value = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_MAPS_PER_CORE, CONFIG_BOOLEAN_YES);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].maps_per_core = value;
- }
-}
-
-/**
- * Read collector values
- *
- * @param disable_cgroups variable to store information related to cgroups.
- * @param update_every value to overwrite the update frequency set by the server.
- * @param origin specify the configuration file loaded
- */
-static void read_collector_values(int *disable_cgroups,
- int update_every, netdata_ebpf_load_mode_t origin)
-{
- // Read global section
- char *value;
- if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load",
- EBPF_CFG_LOAD_MODE_DEFAULT);
- else
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE,
- EBPF_CFG_LOAD_MODE_DEFAULT);
-
- ebpf_how_to_load(value);
-
- btf_path = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_PROGRAM_PATH,
- EBPF_DEFAULT_BTF_PATH);
-
-#ifdef LIBBPF_MAJOR_VERSION
- default_btf = ebpf_load_btf_file(btf_path, EBPF_DEFAULT_BTF_FILE);
-#endif
-
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, EBPF_CFG_DEFAULT_PROGRAM);
-
- epbf_update_load_mode(value, origin);
-
- ebpf_update_interval(update_every);
-
- ebpf_update_table_size();
-
- ebpf_update_lifetime();
-
- // This is kept to keep compatibility
- uint32_t enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "disable apps",
- CONFIG_BOOLEAN_NO);
- if (!enabled) {
- // Apps is a positive sentence, so we need to invert the values to disable apps.
- enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_APPLICATION,
- CONFIG_BOOLEAN_YES);
- enabled = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
- }
-
- ebpf_set_apps_mode(!enabled);
-
- // Cgroup is a positive sentence, so we need to invert the values to disable apps.
- // We are using the same pattern for cgroup and apps
- enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO);
- *disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
-
- ebpf_update_map_per_core();
-
- // Read ebpf programs section
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
- ebpf_modules[EBPF_MODULE_PROCESS_IDX].info.config_name, CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_cgroups);
- }
-
- // This is kept to keep compatibility
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network viewer",
- CONFIG_BOOLEAN_NO);
- if (!enabled)
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].info.config_name,
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_cgroups);
- }
-
- // This is kept to keep compatibility
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connection monitoring",
- CONFIG_BOOLEAN_YES);
- if (!enabled)
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
- CONFIG_BOOLEAN_YES);
-
- network_viewer_opt.enabled = enabled;
- if (enabled) {
- if (!ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled)
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_cgroups);
-
- // Read network viewer section if network viewer is enabled
- // This is kept here to keep backward compatibility
- parse_network_viewer_section(&collector_config);
- ebpf_parse_service_name_section(&collector_config);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat",
- CONFIG_BOOLEAN_NO);
-
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "sync",
- CONFIG_BOOLEAN_YES);
-
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "dcstat",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "swap",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "disk",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DISK_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mount",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_MOUNT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "hardirq",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_HARDIRQ_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "softirq",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "oomkill",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_OOMKILL_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mdflush",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_MDFLUSH_IDX, *disable_cgroups);
- }
-}
-
-/**
- * Load collector config
- *
- * @param path the path where the file ebpf.conf is stored.
- * @param disable_cgroups variable to store the information about cgroups plugin status.
- * @param update_every value to overwrite the update frequency set by the server.
- *
- * @return 0 on success and -1 otherwise.
- */
-static int ebpf_load_collector_config(char *path, int *disable_cgroups, int update_every)
-{
- char lpath[4096];
- netdata_ebpf_load_mode_t origin;
-
- snprintf(lpath, 4095, "%s/%s", path, NETDATA_EBPF_CONFIG_FILE);
- if (!appconfig_load(&collector_config, lpath, 0, NULL)) {
- snprintf(lpath, 4095, "%s/%s", path, NETDATA_EBPF_OLD_CONFIG_FILE);
- if (!appconfig_load(&collector_config, lpath, 0, NULL)) {
- return -1;
- }
- origin = EBPF_LOADED_FROM_STOCK;
- } else
- origin = EBPF_LOADED_FROM_USER;
-
- read_collector_values(disable_cgroups, update_every, origin);
-
- return 0;
-}
-
-/**
- * Set global variables reading environment variables
- */
-void set_global_variables()
-{
- // Get environment variables
- ebpf_plugin_dir = getenv("NETDATA_PLUGINS_DIR");
- if (!ebpf_plugin_dir)
- ebpf_plugin_dir = PLUGINS_DIR;
-
- ebpf_user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
- if (!ebpf_user_config_dir)
- ebpf_user_config_dir = CONFIG_DIR;
-
- ebpf_stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
- if (!ebpf_stock_config_dir)
- ebpf_stock_config_dir = LIBCONFIG_DIR;
-
- ebpf_configured_log_dir = getenv("NETDATA_LOG_DIR");
- if (!ebpf_configured_log_dir)
- ebpf_configured_log_dir = LOG_DIR;
-
- ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
- if (ebpf_nprocs < 0) {
- ebpf_nprocs = NETDATA_MAX_PROCESSOR;
- netdata_log_error("Cannot identify number of process, using default value %d", ebpf_nprocs);
- }
-
- isrh = get_redhat_release();
- pid_max = get_system_pid_max();
- running_on_kernel = ebpf_get_kernel_version();
-}
-
-/**
- * Load collector config
- */
-static inline void ebpf_load_thread_config()
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_update_module(&ebpf_modules[i], default_btf, running_on_kernel, isrh);
- }
-}
-
-/**
- * Check Conditions
- *
- * This function checks kernel that plugin is running and permissions.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-int ebpf_check_conditions()
-{
- if (!has_condition_to_run(running_on_kernel)) {
- netdata_log_error("The current collector cannot run on this kernel.");
- return -1;
- }
-
- if (!am_i_running_as_root()) {
- netdata_log_error(
- "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
- (unsigned int)getuid(), (unsigned int)geteuid());
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Adjust memory
- *
- * Adjust memory values to load eBPF programs.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-int ebpf_adjust_memory_limit()
-{
- struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- netdata_log_error("Setrlimit(RLIMIT_MEMLOCK)");
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Parse arguments given from user.
- *
- * @param argc the number of arguments
- * @param argv the pointer to the arguments
- */
-static void ebpf_parse_args(int argc, char **argv)
-{
- int disable_cgroups = 1;
- int freq = 0;
- int option_index = 0;
- uint64_t select_threads = 0;
- static struct option long_options[] = {
- {"process", no_argument, 0, 0 },
- {"net", no_argument, 0, 0 },
- {"cachestat", no_argument, 0, 0 },
- {"sync", no_argument, 0, 0 },
- {"dcstat", no_argument, 0, 0 },
- {"swap", no_argument, 0, 0 },
- {"vfs", no_argument, 0, 0 },
- {"filesystem", no_argument, 0, 0 },
- {"disk", no_argument, 0, 0 },
- {"mount", no_argument, 0, 0 },
- {"filedescriptor", no_argument, 0, 0 },
- {"hardirq", no_argument, 0, 0 },
- {"softirq", no_argument, 0, 0 },
- {"oomkill", no_argument, 0, 0 },
- {"shm", no_argument, 0, 0 },
- {"mdflush", no_argument, 0, 0 },
- /* INSERT NEW THREADS BEFORE THIS COMMENT TO KEEP COMPATIBILITY WITH enum ebpf_module_indexes */
- {"all", no_argument, 0, 0 },
- {"version", no_argument, 0, 0 },
- {"help", no_argument, 0, 0 },
- {"global", no_argument, 0, 0 },
- {"return", no_argument, 0, 0 },
- {"legacy", no_argument, 0, 0 },
- {"core", no_argument, 0, 0 },
- {"unittest", no_argument, 0, 0 },
- {0, 0, 0, 0}
- };
-
- memset(&network_viewer_opt, 0, sizeof(network_viewer_opt));
- rw_spinlock_init(&network_viewer_opt.rw_spinlock);
-
- if (argc > 1) {
- int n = (int)str2l(argv[1]);
- if (n > 0) {
- freq = n;
- }
- }
-
- if (!freq)
- freq = EBPF_DEFAULT_UPDATE_EVERY;
-
- //rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- if (ebpf_load_collector_config(ebpf_user_config_dir, &disable_cgroups, freq)) {
- netdata_log_info(
- "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
- ebpf_user_config_dir);
- if (ebpf_load_collector_config(ebpf_stock_config_dir, &disable_cgroups, freq)) {
- netdata_log_info("Does not have a stock file. It is starting with default options.");
- }
- }
-
- ebpf_load_thread_config();
- //rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- while (1) {
- int c = getopt_long_only(argc, argv, "", long_options, &option_index);
- if (c == -1)
- break;
-
- switch (option_index) {
- case EBPF_MODULE_PROCESS_IDX: {
- select_threads |= 1<<EBPF_MODULE_PROCESS_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"PROCESS\" charts, because it was started with the option \"[-]-process\".");
-#endif
- break;
- }
- case EBPF_MODULE_SOCKET_IDX: {
- select_threads |= 1<<EBPF_MODULE_SOCKET_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"NET\" charts, because it was started with the option \"[-]-net\".");
-#endif
- break;
- }
- case EBPF_MODULE_CACHESTAT_IDX: {
- select_threads |= 1<<EBPF_MODULE_CACHESTAT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"[-]-cachestat\".");
-#endif
- break;
- }
- case EBPF_MODULE_SYNC_IDX: {
- select_threads |= 1<<EBPF_MODULE_SYNC_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SYNC\" chart, because it was started with the option \"[-]-sync\".");
-#endif
- break;
- }
- case EBPF_MODULE_DCSTAT_IDX: {
- select_threads |= 1<<EBPF_MODULE_DCSTAT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"DCSTAT\" charts, because it was started with the option \"[-]-dcstat\".");
-#endif
- break;
- }
- case EBPF_MODULE_SWAP_IDX: {
- select_threads |= 1<<EBPF_MODULE_SWAP_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SWAP\" chart, because it was started with the option \"[-]-swap\".");
-#endif
- break;
- }
- case EBPF_MODULE_VFS_IDX: {
- select_threads |= 1<<EBPF_MODULE_VFS_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"VFS\" chart, because it was started with the option \"[-]-vfs\".");
-#endif
- break;
- }
- case EBPF_MODULE_FILESYSTEM_IDX: {
- select_threads |= 1<<EBPF_MODULE_FILESYSTEM_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"FILESYSTEM\" chart, because it was started with the option \"[-]-filesystem\".");
-#endif
- break;
- }
- case EBPF_MODULE_DISK_IDX: {
- select_threads |= 1<<EBPF_MODULE_DISK_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"DISK\" chart, because it was started with the option \"[-]-disk\".");
-#endif
- break;
- }
- case EBPF_MODULE_MOUNT_IDX: {
- select_threads |= 1<<EBPF_MODULE_MOUNT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"MOUNT\" chart, because it was started with the option \"[-]-mount\".");
-#endif
- break;
- }
- case EBPF_MODULE_FD_IDX: {
- select_threads |= 1<<EBPF_MODULE_FD_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"FILEDESCRIPTOR\" chart, because it was started with the option \"[-]-filedescriptor\".");
-#endif
- break;
- }
- case EBPF_MODULE_HARDIRQ_IDX: {
- select_threads |= 1<<EBPF_MODULE_HARDIRQ_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"HARDIRQ\" chart, because it was started with the option \"[-]-hardirq\".");
-#endif
- break;
- }
- case EBPF_MODULE_SOFTIRQ_IDX: {
- select_threads |= 1<<EBPF_MODULE_SOFTIRQ_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SOFTIRQ\" chart, because it was started with the option \"[-]-softirq\".");
-#endif
- break;
- }
- case EBPF_MODULE_OOMKILL_IDX: {
- select_threads |= 1<<EBPF_MODULE_OOMKILL_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"OOMKILL\" chart, because it was started with the option \"[-]-oomkill\".");
-#endif
- break;
- }
- case EBPF_MODULE_SHM_IDX: {
- select_threads |= 1<<EBPF_MODULE_SHM_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SHM\" chart, because it was started with the option \"[-]-shm\".");
-#endif
- break;
- }
- case EBPF_MODULE_MDFLUSH_IDX: {
- select_threads |= 1<<EBPF_MODULE_MDFLUSH_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"MDFLUSH\" chart, because it was started with the option \"[-]-mdflush\".");
-#endif
- break;
- }
- case EBPF_OPTION_ALL_CHARTS: {
- ebpf_set_apps_mode(NETDATA_EBPF_APPS_FLAG_YES);
- disable_cgroups = 0;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with all chart groups, because it was started with the option \"[-]-all\".");
-#endif
- break;
- }
- case EBPF_OPTION_VERSION: {
- printf("ebpf.plugin %s\n", VERSION);
- exit(0);
- }
- case EBPF_OPTION_HELP: {
- ebpf_print_help();
- exit(0);
- }
- case EBPF_OPTION_GLOBAL_CHART: {
- disable_cgroups = 1;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with global chart group, because it was started with the option \"[-]-global\".");
-#endif
- break;
- }
- case EBPF_OPTION_RETURN_MODE: {
- ebpf_set_thread_mode(MODE_RETURN);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running in \"RETURN\" mode, because it was started with the option \"[-]-return\".");
-#endif
- break;
- }
- case EBPF_OPTION_LEGACY: {
- ebpf_set_load_mode(EBPF_LOAD_LEGACY, EBPF_LOADED_FROM_USER);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with \"LEGACY\" code, because it was started with the option \"[-]-legacy\".");
-#endif
- break;
- }
- case EBPF_OPTION_CORE: {
- ebpf_set_load_mode(EBPF_LOAD_CORE, EBPF_LOADED_FROM_USER);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with \"CO-RE\" code, because it was started with the option \"[-]-core\".");
-#endif
- break;
- }
- case EBPF_OPTION_UNITTEST: {
- // if we cannot run until the end, we will cancel the unittest
- int exit_code = ECANCELED;
- if (ebpf_check_conditions())
- goto unittest;
-
- if (ebpf_adjust_memory_limit())
- goto unittest;
-
- // Load binary in entry mode
- ebpf_ut_initialize_structure(MODE_ENTRY);
- if (ebpf_ut_load_real_binary())
- goto unittest;
-
- ebpf_ut_cleanup_memory();
-
- // Do not load a binary in entry mode
- ebpf_ut_initialize_structure(MODE_ENTRY);
- if (ebpf_ut_load_fake_binary())
- goto unittest;
-
- ebpf_ut_cleanup_memory();
-
- exit_code = 0;
-unittest:
- exit(exit_code);
- }
- default: {
- break;
- }
- }
- }
-
- if (disable_cgroups) {
- ebpf_disable_cgroups();
- }
-
- if (select_threads) {
- disable_all_global_charts();
- uint64_t idx;
- for (idx = 0; idx < EBPF_OPTION_ALL_CHARTS; idx++) {
- if (select_threads & 1<<idx)
- ebpf_enable_specific_chart(&ebpf_modules[idx], disable_cgroups);
- }
- }
-
- // Load apps_groups.conf
- if (ebpf_read_apps_groups_conf(
- &apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
- netdata_log_info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
- ebpf_user_config_dir, ebpf_stock_config_dir);
- if (ebpf_read_apps_groups_conf(
- &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
- netdata_log_error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
- ebpf_stock_config_dir);
- ebpf_exit();
- }
- } else
- netdata_log_info("Loaded config file '%s/apps_groups.conf'", ebpf_user_config_dir);
-}
-
-/*****************************************************************
- *
- * Collector charts
- *
- *****************************************************************/
-
-static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
-static char *memlock_stat = {"memory_locked"};
-static char *hash_table_stat = {"hash_table"};
-static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
-
-/**
- * Send Hash Table PID data
- *
- * Send all information associated with a specific pid table.
- *
- * @param chart chart id
- * @param idx index position in hash_table_stats
- */
-static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx)
-{
- int i;
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, chart, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.apps_routine)
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- wem->hash_table_stats[idx]:
- 0);
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Global Hash Table data
- *
- * Send all information associated with a specific pid table.
- *
- */
-static inline void ebpf_send_global_hash_table_data()
-{
- int i;
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0);
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Statistic Data
- *
- * Send statistic information to netdata.
- */
-void ebpf_send_statistic_data()
-{
- if (!publish_internal_metrics)
- return;
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS, "");
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.fnct_routine)
- continue;
-
- write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0);
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LIFE_TIME, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- // Threads like VFS is slow to load and this can create an invalid number, this is the motive
- // we are also testing wem->lifetime value.
- if (wem->functions.fnct_routine)
- continue;
-
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- (long long) (wem->lifetime - wem->running_time):
- 0) ;
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD, "");
- write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY], (long long)plugin_statistics.legacy);
- write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE], (long long)plugin_statistics.core);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY, "");
- write_chart_dimension(memlock_stat, (long long)plugin_statistics.memlock_kern);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED, "");
- write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE, "");
- write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
- write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
- ebpf_write_end_chart();
-
- ebpf_send_global_hash_table_data();
-
- ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD);
- ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL);
-
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (!wem->functions.fnct_routine)
- continue;
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_chart_name, "");
- write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_lifetime_name, "");
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- (long long) (wem->lifetime - wem->running_time):
- 0) ;
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Update Internal Metric variable
- *
- * By default eBPF.plugin sends internal metrics for netdata, but user can
- * disable this.
- *
- * The function updates the variable used to send charts.
- */
-static void update_internal_metric_variable()
-{
- const char *s = getenv("NETDATA_INTERNALS_MONITORING");
- if (s && *s && strcmp(s, "NO") == 0)
- publish_internal_metrics = false;
-}
-
-/**
- * Create Thread Chart
- *
- * Write to standard output current values for threads charts.
- *
- * @param name is the chart name
- * @param title chart title.
- * @param units chart units
- * @param order is the chart order
- * @param update_every time used to update charts
- * @param module a module to create a specific chart.
- */
-static void ebpf_create_thread_chart(char *name,
- char *title,
- char *units,
- int order,
- int update_every,
- ebpf_module_t *module)
-{
- // common call for specific and all charts.
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- name,
- "",
- title,
- units,
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order,
- update_every,
- "main");
-
- if (module) {
- ebpf_write_global_dimension((char *)module->info.thread_name,
- (char *)module->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- return;
- }
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *em = &ebpf_modules[i];
- if (em->functions.fnct_routine)
- continue;
-
- ebpf_write_global_dimension((char *)em->info.thread_name,
- (char *)em->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- }
-}
-
-/**
- * Create chart for Load Thread
- *
- * Write to standard output current values for load mode.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_load_chart(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_LOAD_METHOD,
- "",
- "Load info.",
- "methods",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
- load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
- load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart for Kernel Memory
- *
- * Write to standard output current values for allocated memory.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_kernel_memory(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_KERNEL_MEMORY,
- "",
- "Memory allocated for hash tables.",
- "bytes",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(memlock_stat,
- memlock_stat,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart Hash Table
- *
- * Write to standard output number of hash tables used with this software.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_hash_tables(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_LOADED,
- "",
- "Number of hash tables loaded.",
- "hash tables",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_TABLES,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(hash_table_stat,
- hash_table_stat,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart for percpu stats
- *
- * Write to standard output current values for threads.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_hash_per_core(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_PER_CORE,
- "",
- "How threads are loading hash/array tables.",
- "threads",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_CORE,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
- hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
- hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Hash table global elements
- *
- * Write to standard output current values inside global tables.
- *
- * @param update_every time used to update charts
- */
-static void ebpf_create_statistic_hash_global_elements(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS,
- "",
- "Controllers inside global table",
- "rows",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_write_global_dimension((char *)ebpf_modules[i].info.thread_name,
- (char *)ebpf_modules[i].info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- }
-}
-
-/**
- * Hash table global elements
- *
- * Write to standard output current values inside global tables.
- *
- * @param update_every time used to update charts
- * @param id chart id
- * @param title chart title
- * @param order ordder chart will be shown on dashboard.
- */
-static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- id,
- "",
- title,
- "rows",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.apps_routine)
- ebpf_write_global_dimension((char *)wem->info.thread_name,
- (char *)wem->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-}
-
-/**
- * Create Statistics Charts
- *
- * Create charts that will show statistics related to eBPF plugin.
- *
- * @param update_every time used to update charts
- */
-static void ebpf_create_statistic_charts(int update_every)
-{
- static char create_charts = 1;
- update_internal_metric_variable();
- if (!publish_internal_metrics)
- return;
-
- if (!create_charts)
- return;
-
- create_charts = 0;
-
- ebpf_create_thread_chart(NETDATA_EBPF_THREADS,
- "Threads running.",
- "boolean",
- NETDATA_EBPF_ORDER_STAT_THREADS,
- update_every,
- NULL);
- /*
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-#endif
- */
-
- ebpf_create_thread_chart(NETDATA_EBPF_LIFE_TIME,
- "Time remaining for thread.",
- "seconds",
- NETDATA_EBPF_ORDER_STAT_LIFE_TIME,
- update_every,
- NULL);
- /*
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-#endif
- */
-
- int i,j;
- char name[256];
- for (i = 0, j = NETDATA_EBPF_ORDER_FUNCTION_PER_THREAD; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *em = &ebpf_modules[i];
- if (!em->functions.fnct_routine)
- continue;
-
- em->functions.order_thread_chart = j;
- snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_THREADS, em->info.thread_name);
- em->functions.fcnt_thread_chart_name = strdupz(name);
- ebpf_create_thread_chart(name,
- "Threads running.",
- "boolean",
- j++,
- update_every,
- em);
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc);
-#endif
-
- em->functions.order_thread_lifetime = j;
- snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_LIFE_TIME, em->info.thread_name);
- em->functions.fcnt_thread_lifetime_name = strdupz(name);
- ebpf_create_thread_chart(name,
- "Time remaining for thread.",
- "seconds",
- j++,
- update_every,
- em);
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc);
-#endif
- }
-
- ebpf_create_statistic_load_chart(update_every);
-
- ebpf_create_statistic_kernel_memory(update_every);
-
- ebpf_create_statistic_hash_tables(update_every);
-
- ebpf_create_statistic_hash_per_core(update_every);
-
- ebpf_create_statistic_hash_global_elements(update_every);
-
- ebpf_create_statistic_hash_pid_table(update_every,
- NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS,
- "Elements inserted into PID table",
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED);
-
- ebpf_create_statistic_hash_pid_table(update_every,
- NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS,
- "Elements removed from PID table",
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED);
-
- fflush(stdout);
-}
-
-/*****************************************************************
- *
- * COLLECTOR ENTRY POINT
- *
- *****************************************************************/
-
-/**
- * Update PID file
- *
- * Update the content of PID file
- *
- * @param filename is the full name of the file.
- * @param pid that identifies the process
- */
-static void ebpf_update_pid_file(char *filename, pid_t pid)
-{
- FILE *fp = fopen(filename, "w");
- if (!fp)
- return;
-
- fprintf(fp, "%d", pid);
- fclose(fp);
-}
-
-/**
- * Get Process Name
- *
- * Get process name from /proc/PID/status
- *
- * @param pid that identifies the process
- */
-static char *ebpf_get_process_name(pid_t pid)
-{
- char *name = NULL;
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "/proc/%d/status", pid);
-
- procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- netdata_log_error("Cannot open %s", filename);
- return name;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return name;
-
- unsigned long i, lines = procfile_lines(ff);
- for(i = 0; i < lines ; i++) {
- char *cmp = procfile_lineword(ff, i, 0);
- if (!strcmp(cmp, "Name:")) {
- name = strdupz(procfile_lineword(ff, i, 1));
- break;
- }
- }
-
- procfile_close(ff);
-
- return name;
-}
-
-/**
- * Read Previous PID
- *
- * @param filename is the full name of the file.
- *
- * @return It returns the PID used during previous execution on success or 0 otherwise
- */
-static pid_t ebpf_read_previous_pid(char *filename)
-{
- FILE *fp = fopen(filename, "r");
- if (!fp)
- return 0;
-
- char buffer[64];
- size_t length = fread(buffer, sizeof(*buffer), 63, fp);
- pid_t old_pid = 0;
- if (length) {
- if (length > 63)
- length = 63;
-
- buffer[length] = '\0';
- old_pid = (pid_t) str2uint32_t(buffer, NULL);
- }
- fclose(fp);
-
- return old_pid;
-}
-
-/**
- * Kill previous process
- *
- * Kill previous process whether it was not closed.
- *
- * @param filename is the full name of the file.
- * @param pid that identifies the process
- */
-static void ebpf_kill_previous_process(char *filename, pid_t pid)
-{
- pid_t old_pid = ebpf_read_previous_pid(filename);
- if (!old_pid)
- return;
-
- // Process is not running
- char *prev_name = ebpf_get_process_name(old_pid);
- if (!prev_name)
- return;
-
- char *current_name = ebpf_get_process_name(pid);
-
- if (!strcmp(prev_name, current_name))
- kill(old_pid, SIGKILL);
-
- freez(prev_name);
- freez(current_name);
-
- // wait few microseconds before start new plugin
- sleep_usec(USEC_PER_MS * 300);
-}
-
-/**
- * PID file
- *
- * Write the filename for PID inside the given vector.
- *
- * @param filename vector where we will store the name.
- * @param length number of bytes available in filename vector
- */
-void ebpf_pid_file(char *filename, size_t length)
-{
- snprintfz(filename, length, "%s/var/run/ebpf.pid", netdata_configured_host_prefix);
-}
-
-/**
- * Manage PID
- *
- * This function kills another instance of eBPF whether it is necessary and update the file content.
- *
- * @param pid that identifies the process
- */
-static void ebpf_manage_pid(pid_t pid)
-{
- char filename[FILENAME_MAX + 1];
- ebpf_pid_file(filename, FILENAME_MAX);
-
- ebpf_kill_previous_process(filename, pid);
- ebpf_update_pid_file(filename, pid);
-}
-
-/**
- * Set start routine
- *
- * Set static routine before threads to be created.
- */
- static void ebpf_set_static_routine()
- {
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_threads[i].start_routine = ebpf_modules[i].functions.start_routine;
- }
- }
-
-/**
- * Entry point
- *
- * @param argc the number of arguments
- * @param argv the pointer to the arguments
- *
- * @return it returns 0 on success and another integer otherwise
- */
-int main(int argc, char **argv)
-{
- clocks_init();
- nd_log_initialize_for_external_plugins("ebpf.plugin");
-
- main_thread_id = gettid();
-
- set_global_variables();
- ebpf_parse_args(argc, argv);
- ebpf_manage_pid(getpid());
-
- if (ebpf_check_conditions())
- return 2;
-
- if (ebpf_adjust_memory_limit())
- return 3;
-
- signal(SIGINT, ebpf_stop_threads);
- signal(SIGQUIT, ebpf_stop_threads);
- signal(SIGTERM, ebpf_stop_threads);
- signal(SIGPIPE, ebpf_stop_threads);
-
- ebpf_start_pthread_variables();
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix(true) == -1) ebpf_exit(6);
-
- ebpf_allocate_common_vectors();
-
-#ifdef LIBBPF_MAJOR_VERSION
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-#endif
-
- ebpf_read_local_addresses_unsafe();
- read_local_ports("/proc/net/tcp", IPPROTO_TCP);
- read_local_ports("/proc/net/tcp6", IPPROTO_TCP);
- read_local_ports("/proc/net/udp", IPPROTO_UDP);
- read_local_ports("/proc/net/udp6", IPPROTO_UDP);
-
- ebpf_set_static_routine();
-
- cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
- cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
-
- netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
- NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
-
- int i;
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- struct netdata_static_thread *st = &ebpf_threads[i];
-
- ebpf_module_t *em = &ebpf_modules[i];
- em->thread = st;
- em->thread_id = i;
- if (em->enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) {
- st->thread = mallocz(sizeof(netdata_thread_t));
- em->enabled = NETDATA_THREAD_EBPF_RUNNING;
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
- netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
- } else {
- em->lifetime = EBPF_DEFAULT_LIFETIME;
- }
- }
-
- usec_t step = USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
- int update_apps_list = update_apps_every - 1;
- int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
- //Plugin will be killed when it receives a signal
- for ( ; !ebpf_plugin_exit; global_iterations_counter++) {
- (void)heartbeat_next(&hb, step);
-
- if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) {
- pthread_mutex_lock(&lock);
- ebpf_create_statistic_charts(EBPF_DEFAULT_UPDATE_EVERY);
-
- ebpf_send_statistic_data();
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- pthread_mutex_lock(&collect_data_mutex);
- if (++update_apps_list == update_apps_every) {
- update_apps_list = 0;
- cleanup_exited_pids();
- collect_data_for_all_processes(process_pid_fd, process_maps_per_core);
-
- pthread_mutex_lock(&lock);
- ebpf_create_apps_charts(apps_groups_root_target);
- pthread_mutex_unlock(&lock);
- }
- pthread_mutex_unlock(&collect_data_mutex);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-
- ebpf_stop_threads(0);
-
- return 0;
-}
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
deleted file mode 100644
index 5cb844b20..000000000
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Global options
-#
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change the setting
-# `apps` and `cgroups` to 'no'.
-#
-# The `update every` option defines the number of seconds used to read data from kernel and send to netdata
-#
-# The `pid table size` defines the maximum number of PIDs stored in the application hash tables.
-#
-# The `btf path` specifies where to find the BTF files.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.15.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
- ebpf load mode = entry
- apps = no
- cgroups = no
- update every = 5
- pid table size = 32768
- btf path = /sys/kernel/btf/
- maps per core = yes
- lifetime = 300
-
-#
-# eBPF Programs
-#
-# The eBPF collector has the following eBPF programs:
-#
-# `cachestat` : Make charts for kernel functions related to page cache.
-# `dcstat` : Make charts for kernel functions related to directory cache.
-# `disk` : Monitor I/O latencies for disks
-# `fd` : This eBPF program creates charts that show information about file manipulation.
-# `filesystem`: Monitor calls for functions used to manipulate specific filesystems
-# `hardirq` : Monitor latency of serving hardware interrupt requests (hard IRQs).
-# `mdflush` : Monitors flush counts for multi-devices.
-# `mount` : Monitor calls for syscalls mount and umount
-# `oomkill` : This eBPF program creates a chart that shows which process got OOM killed and when.
-# `process` : This eBPF program creates charts that show information about process life.
-# `shm` : Monitor calls for syscalls shmget, shmat, shmdt and shmctl.
-# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
-# bandwidth consumed by each.
-# `softirq` : Monitor latency of serving software interrupt requests (soft IRQs).
-# `sync` : Monitor calls for syscall sync(2).
-# `swap` : Monitor calls for internal swap functions.
-# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
-# files removed.
-#
-# When plugin detects that system has support to BTF, it enables integration with apps.plugin.
-#
-[ebpf programs]
- cachestat = yes
- dcstat = no
- disk = no
- fd = yes
- filesystem = no
- hardirq = no
- mdflush = no
- mount = yes
- oomkill = yes
- process = yes
- shm = yes
- socket = no
- softirq = yes
- sync = no
- swap = yes
- vfs = no
- network connections = no
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
deleted file mode 100644
index 9c51b2c52..000000000
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `pid table size` defines the maximum number of PIDs stored inside the application hash table.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `probe` : This is the same as legacy code.
-#
-# The `collect pid` option defines the PID stored inside hash tables and accepts the following options:
-# `real parent`: Only stores real parent inside PID
-# `parent` : Only stores parent PID.
-# `all` : Stores all PIDs used by software. This is the most expensive option.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
- collect pid = real parent
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
deleted file mode 100644
index 614d814e6..000000000
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ /dev/null
@@ -1,40 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `probe` : This is the same as legacy code.
-#
-# The `collect pid` option defines the PID stored inside hash tables and accepts the following options:
-# `real parent`: Only stores real parent inside PID
-# `parent` : Only stores parent PID.
-# `all` : Stores all PIDs used by software. This is the most expensive option.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
- collect pid = real parent
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/disk.conf b/collectors/ebpf.plugin/ebpf.d/disk.conf
deleted file mode 100644
index c5a0a2708..000000000
--- a/collectors/ebpf.plugin/ebpf.d/disk.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 10
- lifetime = 300
-
diff --git a/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt b/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt
deleted file mode 100644
index 539bf357f..000000000
--- a/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt
+++ /dev/null
@@ -1 +0,0 @@
-Ubuntu 4.18.0
diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf
deleted file mode 100644
index d48230323..000000000
--- a/collectors/ebpf.plugin/ebpf.d/fd.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/filesystem.conf b/collectors/ebpf.plugin/ebpf.d/filesystem.conf
deleted file mode 100644
index 209abba77..000000000
--- a/collectors/ebpf.plugin/ebpf.d/filesystem.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
-#
-[global]
-# ebpf load mode = entry
-# update every = 10
- lifetime = 300
-
-# All filesystems are named as 'NAMEdist' where NAME is the filesystem name while 'dist' is a reference for distribution.
-[filesystem]
- btrfsdist = yes
- ext4dist = yes
- nfsdist = yes
- xfsdist = yes
- zfsdist = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/functions.conf b/collectors/ebpf.plugin/ebpf.d/functions.conf
deleted file mode 100644
index a4f57f641..000000000
--- a/collectors/ebpf.plugin/ebpf.d/functions.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-#[global]
-# update every = 5
-
diff --git a/collectors/ebpf.plugin/ebpf.d/hardirq.conf b/collectors/ebpf.plugin/ebpf.d/hardirq.conf
deleted file mode 100644
index 6a47a94bf..000000000
--- a/collectors/ebpf.plugin/ebpf.d/hardirq.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 10
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/mdflush.conf b/collectors/ebpf.plugin/ebpf.d/mdflush.conf
deleted file mode 100644
index ea97ebe85..000000000
--- a/collectors/ebpf.plugin/ebpf.d/mdflush.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 1
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/mount.conf b/collectors/ebpf.plugin/ebpf.d/mount.conf
deleted file mode 100644
index ff9a2948c..000000000
--- a/collectors/ebpf.plugin/ebpf.d/mount.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
-# `probe` : This is the same as legacy code.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 1
- ebpf type format = auto
- ebpf co-re tracing = trampoline
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/collectors/ebpf.plugin/ebpf.d/network.conf
deleted file mode 100644
index 99c32edc1..000000000
--- a/collectors/ebpf.plugin/ebpf.d/network.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The following options change the hash table size:
-# `bandwidth table size`: Maximum number of connections monitored
-# `ipv4 connection table size`: Maximum number of IPV4 connections monitored
-# `ipv6 connection table size`: Maximum number of IPV6 connections monitored
-# `udp connection table size`: Maximum number of UDP connections monitored
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `collect pid` option defines the PID stored inside hash tables and accepts the following options:
-# `real parent`: Only stores real parent inside PID
-# `parent` : Only stores parent PID.
-# `all` : Stores all PIDs used by software. This is the most expensive option.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
- bandwidth table size = 16384
- socket monitoring table size = 16384
- udp connection table size = 4096
- ebpf type format = auto
- ebpf co-re tracing = probe
- maps per core = no
- collect pid = all
- lifetime = 300
-
-#
-# Network Connection
-#
-# This is a feature with status WIP(Work in Progress)
-#
-[network connections]
- enabled = yes
- resolve hostnames = no
- resolve service names = yes
- ports = *
-# ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128
- ips = *
- hostnames = *
-
-[service name]
- 19999 = Netdata
diff --git a/collectors/ebpf.plugin/ebpf.d/oomkill.conf b/collectors/ebpf.plugin/ebpf.d/oomkill.conf
deleted file mode 100644
index ea97ebe85..000000000
--- a/collectors/ebpf.plugin/ebpf.d/oomkill.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 1
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/collectors/ebpf.plugin/ebpf.d/process.conf
deleted file mode 100644
index 150c57920..000000000
--- a/collectors/ebpf.plugin/ebpf.d/process.conf
+++ /dev/null
@@ -1,31 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
-#
-# The `collect pid` option defines the PID stored inside hash tables and accepts the following options:
-# `real parent`: Only stores real parent inside PID
-# `parent` : Only stores parent PID.
-# `all` : Stores all PIDs used by software. This is the most expensive option.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- collect pid = real parent
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/collectors/ebpf.plugin/ebpf.d/shm.conf
deleted file mode 100644
index 95fb54e0f..000000000
--- a/collectors/ebpf.plugin/ebpf.d/shm.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
-
-# List of monitored syscalls
-[syscalls]
- shmget = yes
- shmat = yes
- shmdt = yes
- shmctl = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/softirq.conf b/collectors/ebpf.plugin/ebpf.d/softirq.conf
deleted file mode 100644
index 6a47a94bf..000000000
--- a/collectors/ebpf.plugin/ebpf.d/softirq.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-[global]
-# ebpf load mode = entry
-# update every = 10
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
deleted file mode 100644
index 29d9b4204..000000000
--- a/collectors/ebpf.plugin/ebpf.d/swap.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/sync.conf b/collectors/ebpf.plugin/ebpf.d/sync.conf
deleted file mode 100644
index a086ed4db..000000000
--- a/collectors/ebpf.plugin/ebpf.d/sync.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
-
-# List of monitored syscalls
-[syscalls]
- sync = yes
- msync = yes
- fsync = yes
- fdatasync = yes
- syncfs = yes
- sync_file_range = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf
deleted file mode 100644
index f511581b8..000000000
--- a/collectors/ebpf.plugin/ebpf.d/vfs.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
deleted file mode 100644
index ad7c5a94c..000000000
--- a/collectors/ebpf.plugin/ebpf.h
+++ /dev/null
@@ -1,393 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_COLLECTOR_EBPF_H
-#define NETDATA_COLLECTOR_EBPF_H 1
-
-#ifndef __FreeBSD__
-#include <linux/perf_event.h>
-#endif
-#include <stdint.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <dlfcn.h>
-
-#include <fcntl.h>
-#include <ctype.h>
-#include <dirent.h>
-
-// From libnetdata.h
-#include "libnetdata/threads/threads.h"
-#include "libnetdata/locks/locks.h"
-#include "libnetdata/avl/avl.h"
-#include "libnetdata/clocks/clocks.h"
-#include "libnetdata/config/appconfig.h"
-#include "libnetdata/ebpf/ebpf.h"
-#include "libnetdata/procfile/procfile.h"
-#include "collectors/cgroups.plugin/sys_fs_cgroup.h"
-#include "daemon/main.h"
-
-#include "ebpf_apps.h"
-#include "ebpf_functions.h"
-#include "ebpf_cgroup.h"
-
-#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
-#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
-
-#ifdef LIBBPF_MAJOR_VERSION // BTF code
-#include "includes/cachestat.skel.h"
-#include "includes/dc.skel.h"
-#include "includes/disk.skel.h"
-#include "includes/fd.skel.h"
-#include "includes/hardirq.skel.h"
-#include "includes/mdflush.skel.h"
-#include "includes/mount.skel.h"
-#include "includes/shm.skel.h"
-#include "includes/socket.skel.h"
-#include "includes/swap.skel.h"
-#include "includes/vfs.skel.h"
-
-extern struct cachestat_bpf *cachestat_bpf_obj;
-extern struct dc_bpf *dc_bpf_obj;
-extern struct disk_bpf *disk_bpf_obj;
-extern struct fd_bpf *fd_bpf_obj;
-extern struct hardirq_bpf *hardirq_bpf_obj;
-extern struct mount_bpf *mount_bpf_obj;
-extern struct mdflush_bpf *mdflush_bpf_obj;
-extern struct shm_bpf *shm_bpf_obj;
-extern struct socket_bpf *socket_bpf_obj;
-extern struct swap_bpf *bpf_obj;
-extern struct vfs_bpf *vfs_bpf_obj;
-#endif
-
-typedef struct netdata_syscall_stat {
- unsigned long bytes; // total number of bytes
- uint64_t call; // total number of calls
- uint64_t ecall; // number of calls that returned error
- struct netdata_syscall_stat *next; // Link list
-} netdata_syscall_stat_t;
-
-typedef struct netdata_publish_syscall {
- char *dimension;
- char *name;
- char *algorithm;
- unsigned long nbyte;
- unsigned long pbyte;
- uint64_t ncall;
- uint64_t pcall;
- uint64_t nerr;
- uint64_t perr;
- struct netdata_publish_syscall *next;
-} netdata_publish_syscall_t;
-
-typedef struct netdata_publish_vfs_common {
- long write;
- long read;
-
- long running;
- long zombie;
-} netdata_publish_vfs_common_t;
-
-typedef struct netdata_error_report {
- char comm[16];
- __u32 pid;
-
- int type;
- int err;
-} netdata_error_report_t;
-
-typedef struct netdata_ebpf_judy_pid {
- ARAL *pid_table;
-
- // Index for PIDs
- struct { // support for multiple indexing engines
- Pvoid_t JudyLArray; // the hash table
- RW_SPINLOCK rw_spinlock; // protect the index
- } index;
-} netdata_ebpf_judy_pid_t;
-
-typedef struct netdata_ebpf_judy_pid_stats {
- char *cmdline;
-
- // Index for Socket timestamp
- struct { // support for multiple indexing engines
- Pvoid_t JudyLArray; // the hash table
- RW_SPINLOCK rw_spinlock; // protect the index
- } socket_stats;
-} netdata_ebpf_judy_pid_stats_t;
-
-extern ebpf_module_t ebpf_modules[];
-enum ebpf_main_index {
- EBPF_MODULE_PROCESS_IDX,
- EBPF_MODULE_SOCKET_IDX,
- EBPF_MODULE_CACHESTAT_IDX,
- EBPF_MODULE_SYNC_IDX,
- EBPF_MODULE_DCSTAT_IDX,
- EBPF_MODULE_SWAP_IDX,
- EBPF_MODULE_VFS_IDX,
- EBPF_MODULE_FILESYSTEM_IDX,
- EBPF_MODULE_DISK_IDX,
- EBPF_MODULE_MOUNT_IDX,
- EBPF_MODULE_FD_IDX,
- EBPF_MODULE_HARDIRQ_IDX,
- EBPF_MODULE_SOFTIRQ_IDX,
- EBPF_MODULE_OOMKILL_IDX,
- EBPF_MODULE_SHM_IDX,
- EBPF_MODULE_MDFLUSH_IDX,
- EBPF_MODULE_FUNCTION_IDX,
- /* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */
- EBPF_OPTION_ALL_CHARTS,
- EBPF_OPTION_VERSION,
- EBPF_OPTION_HELP,
- EBPF_OPTION_GLOBAL_CHART,
- EBPF_OPTION_RETURN_MODE,
- EBPF_OPTION_LEGACY,
- EBPF_OPTION_CORE,
- EBPF_OPTION_UNITTEST
-};
-
-typedef struct ebpf_tracepoint {
- bool enabled;
- char *class;
- char *event;
-} ebpf_tracepoint_t;
-
-// Copied from musl header
-#ifndef offsetof
-#if __GNUC__ > 3
-#define offsetof(type, member) __builtin_offsetof(type, member)
-#else
-#define offsetof(type, member) ((size_t)((char *)&(((type *)0)->member) - (char *)0))
-#endif
-#endif
-
-// Messages
-#define NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND "Cannot find the necessary functions to monitor"
-
-// Chart definitions
-#define NETDATA_EBPF_FAMILY "ebpf"
-#define NETDATA_EBPF_IP_FAMILY "ip"
-#define NETDATA_FILESYSTEM_FAMILY "filesystem"
-#define NETDATA_EBPF_MOUNT_GLOBAL_FAMILY "mount_points"
-#define NETDATA_EBPF_CHART_TYPE_LINE "line"
-#define NETDATA_EBPF_CHART_TYPE_STACKED "stacked"
-#define NETDATA_EBPF_MEMORY_GROUP "mem"
-#define NETDATA_EBPF_SYSTEM_GROUP "system"
-#define NETDATA_SYSTEM_SWAP_SUBMENU "swap"
-#define NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU "swap (eBPF)"
-#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory"
-#define NETDATA_MONITORING_FAMILY "netdata"
-
-// Statistics charts
-#define NETDATA_EBPF_THREADS "ebpf_threads"
-#define NETDATA_EBPF_LIFE_TIME "ebpf_life_time"
-#define NETDATA_EBPF_LOAD_METHOD "ebpf_load_methods"
-#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
-#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
-#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
-#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements"
-#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements"
-#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements"
-
-// Log file
-#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
-
-// Maximum number of processors monitored on perf events
-#define NETDATA_MAX_PROCESSOR 512
-
-// Kernel versions calculated with the formula:
-// R = MAJOR*65536 + MINOR*256 + PATCH
-#define NETDATA_KERNEL_V5_3 328448
-#define NETDATA_KERNEL_V4_15 265984
-
-#define EBPF_SYS_CLONE_IDX 11
-#define EBPF_MAX_MAPS 32
-
-#define EBPF_DEFAULT_UPDATE_EVERY 10
-
-enum ebpf_algorithms_list {
- NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX
-};
-
-// Threads
-void *ebpf_process_thread(void *ptr);
-void *ebpf_socket_thread(void *ptr);
-
-// Common variables
-extern pthread_mutex_t lock;
-extern pthread_mutex_t ebpf_exit_cleanup;
-extern int ebpf_nprocs;
-extern int running_on_kernel;
-extern int isrh;
-extern char *ebpf_plugin_dir;
-extern int process_pid_fd;
-
-extern pthread_mutex_t collect_data_mutex;
-
-// Common functions
-void ebpf_global_labels(netdata_syscall_stat_t *is,
- netdata_publish_syscall_t *pio,
- char **dim,
- char **name,
- int *algorithm,
- int end);
-
-void ebpf_write_chart_cmd(char *type,
- char *id,
- char *suffix,
- char *title,
- char *units,
- char *family,
- char *charttype,
- char *context,
- int order,
- int update_every,
- char *module);
-
-void ebpf_write_global_dimension(char *name, char *id, char *algorithm);
-
-void ebpf_create_global_dimension(void *ptr, int end);
-
-void ebpf_create_chart(char *type,
- char *id,
- char *title,
- char *units,
- char *family,
- char *context,
- char *charttype,
- int order,
- void (*ncd)(void *, int),
- void *move,
- int end,
- int update_every,
- char *module);
-
-void write_chart_dimension(char *dim, long long value);
-
-void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end);
-
-void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
-
-void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite,
- char *dread, long long vread);
-
-/**
- * Create Chart labels
- *
- * @param name the label name.
- * @param value the label value.
- * @param origin the labeel source.
- */
-static inline void ebpf_create_chart_labels(char *name, char *value, int source)
-{
- fprintf(stdout, "CLABEL '%s' '%s' %d\n", name, value, source);
-}
-
-/**
- * Commit label
- *
- * Write commit label to stdout
- */
-static inline void ebpf_commit_label()
-{
- fprintf(stdout, "CLABEL_COMMIT\n");
-}
-
-/**
- * Write begin command on standard output
- *
- * @param family the chart family name
- * @param name the chart name
- * @param metric the chart suffix (used with apps and cgroups)
- */
-static inline void ebpf_write_begin_chart(char *family, char *name, char *metric)
-{
- printf("BEGIN %s.%s%s\n", family, name, metric);
-}
-
-/**
- * Write END command on stdout.
- */
-static inline void ebpf_write_end_chart()
-{
- printf("END\n");
-}
-
-int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
-int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
-uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);
-
-void ebpf_pid_file(char *filename, size_t length);
-
-#define EBPF_PROGRAMS_SECTION "ebpf programs"
-
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_PROGRAMS_SECTION "ebpf programs"
-
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_COMMON_DIMENSION_CALL "calls/s"
-#define EBPF_COMMON_DIMENSION_CONNECTIONS "connections/s"
-#define EBPF_COMMON_DIMENSION_BITS "kilobits/s"
-#define EBPF_COMMON_DIMENSION_BYTES "bytes/s"
-#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
-#define EBPF_COMMON_DIMENSION_PACKETS "packets"
-#define EBPF_COMMON_DIMENSION_FILES "files"
-#define EBPF_COMMON_DIMENSION_MILLISECONDS "milliseconds"
-#define EBPF_COMMON_DIMENSION_KILLS "kills"
-
-// Common variables
-extern int debug_enabled;
-extern struct ebpf_pid_stat *ebpf_root_of_pids;
-extern ebpf_cgroup_target_t *ebpf_cgroup_pids;
-extern char *ebpf_algorithms[];
-extern struct config collector_config;
-extern netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup;
-extern int shm_fd_ebpf_cgroup;
-extern sem_t *shm_sem_ebpf_cgroup;
-extern pthread_mutex_t mutex_cgroup_shm;
-extern size_t ebpf_all_pids_count;
-extern ebpf_plugin_stats_t plugin_statistics;
-#ifdef LIBBPF_MAJOR_VERSION
-extern struct btf *default_btf;
-#else
-extern void *default_btf;
-#endif
-
-// Socket functions and variables
-// Common functions
-void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root);
-void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1);
-collected_number get_value_from_structure(char *basis, size_t offset);
-void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em);
-void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every);
-void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
-void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
-ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
-void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);
-
-void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd,
- int maps_per_core, uint32_t begin, uint32_t end);
-void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key);
-netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid);
-
-void parse_network_viewer_section(struct config *cfg);
-void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean);
-void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean);
-void ebpf_read_local_addresses_unsafe();
-
-extern ebpf_filesystem_partitions_t localfs[];
-extern ebpf_sync_syscalls_t local_syscalls[];
-extern bool ebpf_plugin_exit;
-void ebpf_stop_threads(int sig);
-extern netdata_ebpf_judy_pid_t ebpf_judy_pid;
-
-#define EBPF_MAX_SYNCHRONIZATION_TIME 300
-
-#endif /* NETDATA_COLLECTOR_EBPF_H */
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
deleted file mode 100644
index 10c452267..000000000
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ /dev/null
@@ -1,1485 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_socket.h"
-#include "ebpf_apps.h"
-
-// ----------------------------------------------------------------------------
-// ARAL vectors used to speed up processing
-ARAL *ebpf_aral_apps_pid_stat = NULL;
-ARAL *ebpf_aral_process_stat = NULL;
-ARAL *ebpf_aral_socket_pid = NULL;
-ARAL *ebpf_aral_cachestat_pid = NULL;
-ARAL *ebpf_aral_dcstat_pid = NULL;
-ARAL *ebpf_aral_vfs_pid = NULL;
-ARAL *ebpf_aral_fd_pid = NULL;
-ARAL *ebpf_aral_shm_pid = NULL;
-
-// ----------------------------------------------------------------------------
-// Global vectors used with apps
-ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
-netdata_publish_cachestat_t **cachestat_pid = NULL;
-netdata_publish_dcstat_t **dcstat_pid = NULL;
-netdata_publish_swap_t **swap_pid = NULL;
-netdata_publish_vfs_t **vfs_pid = NULL;
-netdata_fd_stat_t **fd_pid = NULL;
-netdata_publish_shm_t **shm_pid = NULL;
-ebpf_process_stat_t **global_process_stats = NULL;
-
-/**
- * eBPF ARAL Init
- *
- * Initiallize array allocator that will be used when integration with apps and ebpf is created.
- */
-void ebpf_aral_init(void)
-{
- size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
- if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
- netdata_log_error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
- max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
- }
-
- ebpf_aral_apps_pid_stat = ebpf_allocate_pid_aral("ebpf_pid_stat", sizeof(struct ebpf_pid_stat));
-
- ebpf_aral_process_stat = ebpf_allocate_pid_aral(NETDATA_EBPF_PROC_ARAL_NAME, sizeof(ebpf_process_stat_t));
-
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Plugin is using ARAL with values %d", NETDATA_EBPF_ALLOC_MAX_PID);
-#endif
-}
-
-/**
- * eBPF pid stat get
- *
- * Get a ebpf_pid_stat entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-struct ebpf_pid_stat *ebpf_pid_stat_get(void)
-{
- struct ebpf_pid_stat *target = aral_mallocz(ebpf_aral_apps_pid_stat);
- memset(target, 0, sizeof(struct ebpf_pid_stat));
- return target;
-}
-
-/**
- * eBPF target release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
-{
- aral_freez(ebpf_aral_apps_pid_stat, stat);
-}
-
-/*****************************************************************
- *
- * PROCESS ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF process stat get
- *
- * Get a ebpf_pid_stat entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-ebpf_process_stat_t *ebpf_process_stat_get(void)
-{
- ebpf_process_stat_t *target = aral_mallocz(ebpf_aral_process_stat);
- memset(target, 0, sizeof(ebpf_process_stat_t));
- return target;
-}
-
-/**
- * eBPF process release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_process_stat_release(ebpf_process_stat_t *stat)
-{
- aral_freez(ebpf_aral_process_stat, stat);
-}
-
-/*****************************************************************
- *
- * SOCKET ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF socket Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_socket_aral_init()
-{
- ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
-}
-
-/**
- * eBPF socket get
- *
- * Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
-{
- ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
- memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
- return target;
-}
-
-/*****************************************************************
- *
- * CACHESTAT ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF Cachestat Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_cachestat_aral_init()
-{
- ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
-}
-
-/**
- * eBPF publish cachestat get
- *
- * Get a netdata_publish_cachestat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
-{
- netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
- memset(target, 0, sizeof(netdata_publish_cachestat_t));
- return target;
-}
-
-/**
- * eBPF cachestat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
-{
- aral_freez(ebpf_aral_cachestat_pid, stat);
-}
-
-/*****************************************************************
- *
- * DCSTAT ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF directory cache Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_dcstat_aral_init()
-{
- ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
-}
-
-/**
- * eBPF publish dcstat get
- *
- * Get a netdata_publish_dcstat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
-{
- netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
- memset(target, 0, sizeof(netdata_publish_dcstat_t));
- return target;
-}
-
-/**
- * eBPF dcstat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
-{
- aral_freez(ebpf_aral_dcstat_pid, stat);
-}
-
-/*****************************************************************
- *
- * VFS ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF VFS Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_vfs_aral_init()
-{
- ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
-}
-
-/**
- * eBPF publish VFS get
- *
- * Get a netdata_publish_vfs_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_vfs_t *ebpf_vfs_get(void)
-{
- netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
- memset(target, 0, sizeof(netdata_publish_vfs_t));
- return target;
-}
-
-/**
- * eBPF VFS release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_vfs_release(netdata_publish_vfs_t *stat)
-{
- aral_freez(ebpf_aral_vfs_pid, stat);
-}
-
-/*****************************************************************
- *
- * FD ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF file descriptor Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_fd_aral_init()
-{
- ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
-}
-
-/**
- * eBPF publish file descriptor get
- *
- * Get a netdata_fd_stat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_fd_stat_t *ebpf_fd_stat_get(void)
-{
- netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
- memset(target, 0, sizeof(netdata_fd_stat_t));
- return target;
-}
-
-/**
- * eBPF file descriptor release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_fd_release(netdata_fd_stat_t *stat)
-{
- aral_freez(ebpf_aral_fd_pid, stat);
-}
-
-/*****************************************************************
- *
- * SHM ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF shared memory Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_shm_aral_init()
-{
- ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
-}
-
-/**
- * eBPF shared memory get
- *
- * Get a netdata_publish_shm_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_shm_t *ebpf_shm_stat_get(void)
-{
- netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
- memset(target, 0, sizeof(netdata_publish_shm_t));
- return target;
-}
-
-/**
- * eBPF shared memory release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_shm_release(netdata_publish_shm_t *stat)
-{
- aral_freez(ebpf_aral_shm_pid, stat);
-}
-
-// ----------------------------------------------------------------------------
-// internal flags
-// handled in code (automatically set)
-
-static int proc_pid_cmdline_is_needed = 0; // 1 when we need to read /proc/cmdline
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO READ HASH TABLES
- *
- *****************************************************************/
-
-/**
- * Read statistic hash table.
- *
- * @param ep the output structure.
- * @param fd the file descriptor mapped from kernel ring.
- * @param pid the index used to select the data.
- * @param bpf_map_lookup_elem a pointer for the function used to read data.
- *
- * @return It returns 0 when the data was copied and -1 otherwise
- */
-int ebpf_read_hash_table(void *ep, int fd, uint32_t pid)
-{
- if (!ep)
- return -1;
-
- if (!bpf_map_lookup_elem(fd, &pid, ep))
- return 0;
-
- return -1;
-}
-
-/*****************************************************************
- *
- * FUNCTIONS CALLED FROM COLLECTORS
- *
- *****************************************************************/
-
-/**
- * Am I running as Root
- *
- * Verify the user that is running the collector.
- *
- * @return It returns 1 for root and 0 otherwise.
- */
-int am_i_running_as_root()
-{
- uid_t uid = getuid(), euid = geteuid();
-
- if (uid == 0 || euid == 0) {
- return 1;
- }
-
- return 0;
-}
-
-/**
- * Reset the target values
- *
- * @param root the pointer to the chain that will be reset.
- *
- * @return it returns the number of structures that was reset.
- */
-size_t zero_all_targets(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- size_t count = 0;
-
- for (w = root; w; w = w->next) {
- count++;
-
- if (unlikely(w->root_pid)) {
- struct ebpf_pid_on_target *pid_on_target = w->root_pid;
-
- while (pid_on_target) {
- struct ebpf_pid_on_target *pid_on_target_to_free = pid_on_target;
- pid_on_target = pid_on_target->next;
- freez(pid_on_target_to_free);
- }
-
- w->root_pid = NULL;
- }
- }
-
- return count;
-}
-
-/**
- * Clean the allocated structures
- *
- * @param agrt the pointer to be cleaned.
- */
-void clean_apps_groups_target(struct ebpf_target *agrt)
-{
- struct ebpf_target *current_target;
- while (agrt) {
- current_target = agrt;
- agrt = current_target->target;
-
- freez(current_target);
- }
-}
-
-/**
- * Find or create a new target
- * there are targets that are just aggregated to other target (the second argument)
- *
- * @param id
- * @param target
- * @param name
- *
- * @return It returns the target on success and NULL otherwise
- */
-struct ebpf_target *get_apps_groups_target(struct ebpf_target **agrt, const char *id, struct ebpf_target *target, const char *name)
-{
- int tdebug = 0, thidden = target ? target->hidden : 0, ends_with = 0;
- const char *nid = id;
-
- // extract the options
- while (nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
- if (nid[0] == '-')
- thidden = 1;
- if (nid[0] == '+')
- tdebug = 1;
- if (nid[0] == '*')
- ends_with = 1;
- nid++;
- }
- uint32_t hash = simple_hash(id);
-
- // find if it already exists
- struct ebpf_target *w, *last = *agrt;
- for (w = *agrt; w; w = w->next) {
- if (w->idhash == hash && strncmp(nid, w->id, EBPF_MAX_NAME) == 0)
- return w;
-
- last = w;
- }
-
- // find an existing target
- if (unlikely(!target)) {
- while (*name == '-') {
- if (*name == '-')
- thidden = 1;
- name++;
- }
-
- for (target = *agrt; target != NULL; target = target->next) {
- if (!target->target && strcmp(name, target->name) == 0)
- break;
- }
- }
-
- if (target && target->target)
- fatal(
- "Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id,
- target->id, target->target->id);
-
- w = callocz(1, sizeof(struct ebpf_target));
- strncpyz(w->id, nid, EBPF_MAX_NAME);
- w->idhash = simple_hash(w->id);
-
- if (unlikely(!target))
- // copy the name
- strncpyz(w->name, name, EBPF_MAX_NAME);
- else
- // copy the id
- strncpyz(w->name, nid, EBPF_MAX_NAME);
-
- strncpyz(w->clean_name, w->name, EBPF_MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
- for (char *d = w->clean_name; *d; d++) {
- if (*d == '.')
- *d = '_';
- }
-
- strncpyz(w->compare, nid, EBPF_MAX_COMPARE_NAME);
- size_t len = strlen(w->compare);
- if (w->compare[len - 1] == '*') {
- w->compare[len - 1] = '\0';
- w->starts_with = 1;
- }
- w->ends_with = ends_with;
-
- if (w->starts_with && w->ends_with)
- proc_pid_cmdline_is_needed = 1;
-
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
-
- w->hidden = thidden;
-#ifdef NETDATA_INTERNAL_CHECKS
- w->debug_enabled = tdebug;
-#else
- if (tdebug)
- fprintf(stderr, "apps.plugin has been compiled without debugging\n");
-#endif
- w->target = target;
-
- // append it, to maintain the order in apps_groups.conf
- if (last)
- last->next = w;
- else
- *agrt = w;
-
- return w;
-}
-
-/**
- * Read the apps_groups.conf file
- *
- * @param agrt a pointer to apps_group_root_target
- * @param path the directory to search apps_%s.conf
- * @param file the word to complement the file name.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-int ebpf_read_apps_groups_conf(struct ebpf_target **agdt, struct ebpf_target **agrt, const char *path, const char *file)
-{
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/apps_%s.conf", path, file);
-
- // ----------------------------------------
-
- procfile *ff = procfile_open_no_log(filename, " :\t", PROCFILE_FLAG_DEFAULT);
- if (!ff)
- return -1;
-
- procfile_set_quotes(ff, "'\"");
-
- ff = procfile_readall(ff);
- if (!ff)
- return -1;
-
- size_t line, lines = procfile_lines(ff);
-
- for (line = 0; line < lines; line++) {
- size_t word, words = procfile_linewords(ff, line);
- if (!words)
- continue;
-
- char *name = procfile_lineword(ff, line, 0);
- if (!name || !*name)
- continue;
-
- // find a possibly existing target
- struct ebpf_target *w = NULL;
-
- // loop through all words, skipping the first one (the name)
- for (word = 0; word < words; word++) {
- char *s = procfile_lineword(ff, line, word);
- if (!s || !*s)
- continue;
- if (*s == '#')
- break;
-
- // is this the first word? skip it
- if (s == name)
- continue;
-
- // add this target
- struct ebpf_target *n = get_apps_groups_target(agrt, s, w, name);
- if (!n) {
- netdata_log_error("Cannot create target '%s' (line %zu, word %zu)", s, line, word);
- continue;
- }
-
- // just some optimization
- // to avoid searching for a target for each process
- if (!w)
- w = n->target ? n->target : n;
- }
- }
-
- procfile_close(ff);
-
- *agdt = get_apps_groups_target(agrt, "p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
- if (!*agdt)
- fatal("Cannot create default target");
-
- struct ebpf_target *ptr = *agdt;
- if (ptr->target)
- *agdt = ptr->target;
-
- return 0;
-}
-
-// the minimum PID of the system
-// this is also the pid of the init process
-#define INIT_PID 1
-
-// ----------------------------------------------------------------------------
-// string lengths
-
-#define MAX_CMDLINE 16384
-
-struct ebpf_pid_stat **ebpf_all_pids = NULL; // to avoid allocations, we pre-allocate the
- // the entire pid space.
-struct ebpf_pid_stat *ebpf_root_of_pids = NULL; // global list of all processes running
-
-size_t ebpf_all_pids_count = 0; // the number of processes running
-
-struct ebpf_target
- *apps_groups_default_target = NULL, // the default target
- *apps_groups_root_target = NULL, // apps_groups.conf defined
- *users_root_target = NULL, // users
- *groups_root_target = NULL; // user groups
-
-size_t apps_groups_targets_count = 0; // # of apps_groups.conf targets
-
-// ----------------------------------------------------------------------------
-// internal counters
-
-static size_t
- // global_iterations_counter = 1,
- calls_counter = 0,
- // file_counter = 0,
- // filenames_allocated_counter = 0,
- // inodes_changed_counter = 0,
- // links_changed_counter = 0,
- targets_assignment_counter = 0;
-
-// ----------------------------------------------------------------------------
-// debugging
-
-// log each problem once per process
-// log flood protection flags (log_thrown)
-#define PID_LOG_IO 0x00000001
-#define PID_LOG_STATUS 0x00000002
-#define PID_LOG_CMDLINE 0x00000004
-#define PID_LOG_FDS 0x00000008
-#define PID_LOG_STAT 0x00000010
-
-int debug_enabled = 0;
-
-#ifdef NETDATA_INTERNAL_CHECKS
-
-#define debug_log(fmt, args...) \
- do { \
- if (unlikely(debug_enabled)) \
- debug_log_int(fmt, ##args); \
- } while (0)
-
-#else
-
-static inline void debug_log_dummy(void)
-{
-}
-#define debug_log(fmt, args...) debug_log_dummy()
-
-#endif
-
-/**
- * Managed log
- *
- * Store log information if it is necessary.
- *
- * @param p the pid stat structure
- * @param log the log id
- * @param status the return from a function.
- *
- * @return It returns the status value.
- */
-static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status)
-{
- if (unlikely(!status)) {
- // netdata_log_error("command failed log %u, errno %d", log, errno);
-
- if (unlikely(debug_enabled || errno != ENOENT)) {
- if (unlikely(debug_enabled || !(p->log_thrown & log))) {
- p->log_thrown |= log;
- switch (log) {
- case PID_LOG_IO:
- netdata_log_error(
- "Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid,
- p->comm);
- break;
-
- case PID_LOG_STATUS:
- netdata_log_error(
- "Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid,
- p->comm);
- break;
-
- case PID_LOG_CMDLINE:
- netdata_log_error(
- "Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid,
- p->comm);
- break;
-
- case PID_LOG_FDS:
- netdata_log_error(
- "Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix,
- p->pid, p->comm);
- break;
-
- case PID_LOG_STAT:
- break;
-
- default:
- netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
- break;
- }
- }
- }
- errno = 0;
- } else if (unlikely(p->log_thrown & log)) {
- // netdata_log_error("unsetting log %u on pid %d", log, p->pid);
- p->log_thrown &= ~log;
- }
-
- return status;
-}
-
-/**
- * Get PID entry
- *
- * Get or allocate the PID entry for the specified pid.
- *
- * @param pid the pid to search the data.
- *
- * @return It returns the pid entry structure
- */
-static inline struct ebpf_pid_stat *get_pid_entry(pid_t pid)
-{
- if (unlikely(ebpf_all_pids[pid]))
- return ebpf_all_pids[pid];
-
- struct ebpf_pid_stat *p = ebpf_pid_stat_get();
-
- if (likely(ebpf_root_of_pids))
- ebpf_root_of_pids->prev = p;
-
- p->next = ebpf_root_of_pids;
- ebpf_root_of_pids = p;
-
- p->pid = pid;
-
- ebpf_all_pids[pid] = p;
- ebpf_all_pids_count++;
-
- return p;
-}
-
-/**
- * Assign the PID to a target.
- *
- * @param p the pid_stat structure to assign for a target.
- */
-static inline void assign_target_to_pid(struct ebpf_pid_stat *p)
-{
- targets_assignment_counter++;
-
- uint32_t hash = simple_hash(p->comm);
- size_t pclen = strlen(p->comm);
-
- struct ebpf_target *w;
- for (w = apps_groups_root_target; w; w = w->next) {
- // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
-
- // find it - 4 cases:
- // 1. the target is not a pattern
- // 2. the target has the prefix
- // 3. the target has the suffix
- // 4. the target is something inside cmdline
-
- if (unlikely(
- ((!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm)) ||
- (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen)) ||
- (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen])) ||
- (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))))) {
- if (w->target)
- p->target = w->target;
- else
- p->target = w;
-
- if (debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("%s linked to target %s", p->comm, p->target->name);
-
- break;
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-// update pids from proc
-
-/**
- * Read cmd line from /proc/PID/cmdline
- *
- * @param p the ebpf_pid_stat_structure.
- *
- * @return It returns 1 on success and 0 otherwise.
- */
-static inline int read_proc_pid_cmdline(struct ebpf_pid_stat *p)
-{
- static char cmdline[MAX_CMDLINE + 1];
-
- int ret = 0;
- if (unlikely(!p->cmdline_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
- p->cmdline_filename = strdupz(filename);
- }
-
- int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
- if (unlikely(fd == -1))
- goto cleanup;
-
- ssize_t i, bytes = read(fd, cmdline, MAX_CMDLINE);
- close(fd);
-
- if (unlikely(bytes < 0))
- goto cleanup;
-
- cmdline[bytes] = '\0';
- for (i = 0; i < bytes; i++) {
- if (unlikely(!cmdline[i]))
- cmdline[i] = ' ';
- }
-
- debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
-
- ret = 1;
-
-cleanup:
- // copy the command to the command line
- if (p->cmdline)
- freez(p->cmdline);
- p->cmdline = strdupz(p->comm);
-
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid);
- if (pid_ptr)
- pid_ptr->cmdline = p->cmdline;
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- return ret;
-}
-
-/**
- * Read information from /proc/PID/stat and /proc/PID/cmdline
- * Assign target to pid
- *
- * @param p the pid stat structure to store the data.
- * @param ptr an useless argument.
- */
-static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr)
-{
- UNUSED(ptr);
-
- static procfile *ff = NULL;
-
- if (unlikely(!p->stat_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
- p->stat_filename = strdupz(filename);
- }
-
- int set_quotes = (!ff) ? 1 : 0;
-
- struct stat statbuf;
- if (stat(p->stat_filename, &statbuf))
- return 0;
-
- ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if (unlikely(!ff))
- return 0;
-
- if (unlikely(set_quotes))
- procfile_set_open_close(ff, "(", ")");
-
- ff = procfile_readall(ff);
- if (unlikely(!ff))
- return 0;
-
- p->last_stat_collected_usec = p->stat_collected_usec;
- p->stat_collected_usec = now_monotonic_usec();
- calls_counter++;
-
- char *comm = procfile_lineword(ff, 0, 1);
- p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
-
- if (strcmp(p->comm, comm) != 0) {
- if (unlikely(debug_enabled)) {
- if (p->comm[0])
- debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
- else
- debug_log("\tJust added %d (%s)", p->pid, comm);
- }
-
- strncpyz(p->comm, comm, EBPF_MAX_COMPARE_NAME);
-
- // /proc/<pid>/cmdline
- if (likely(proc_pid_cmdline_is_needed))
- managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
-
- assign_target_to_pid(p);
- }
-
- if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int(
- "READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu)",
- netdata_configured_host_prefix, p->pid, p->comm, (p->target) ? p->target->name : "UNSET",
- p->stat_collected_usec - p->last_stat_collected_usec);
-
- return 1;
-}
-
-/**
- * Collect data for PID
- *
- * @param pid the current pid that we are working
- * @param ptr a NULL value
- *
- * @return It returns 1 on success and 0 otherwise
- */
-static inline int collect_data_for_pid(pid_t pid, void *ptr)
-{
- if (unlikely(pid < 0 || pid > pid_max)) {
- netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
- return 0;
- }
-
- struct ebpf_pid_stat *p = get_pid_entry(pid);
- if (unlikely(!p || p->read))
- return 0;
- p->read = 1;
-
- if (unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
- // there is no reason to proceed if we cannot get its status
- return 0;
-
- // check its parent pid
- if (unlikely(p->ppid < 0 || p->ppid > pid_max)) {
- netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
- p->ppid = 0;
- }
-
- // mark it as updated
- p->updated = 1;
- p->keep = 0;
- p->keeploops = 0;
-
- return 1;
-}
-
-/**
- * Fill link list of parents with children PIDs
- */
-static inline void link_all_processes_to_their_parents(void)
-{
- struct ebpf_pid_stat *p, *pp;
-
- // link all children to their parents
- // and update children count on parents
- for (p = ebpf_root_of_pids; p; p = p->next) {
- // for each process found
-
- p->sortlist = 0;
- p->parent = NULL;
-
- if (unlikely(!p->ppid)) {
- p->parent = NULL;
- continue;
- }
-
- pp = ebpf_all_pids[p->ppid];
- if (likely(pp)) {
- p->parent = pp;
- pp->children_count++;
-
- if (unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int(
- "child %d (%s, %s) on target '%s' has parent %d (%s, %s).", p->pid, p->comm,
- p->updated ? "running" : "exited", (p->target) ? p->target->name : "UNSET", pp->pid, pp->comm,
- pp->updated ? "running" : "exited");
- } else {
- p->parent = NULL;
- debug_log("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
- }
- }
-}
-
-/**
- * Aggregate PIDs to targets.
- */
-static void apply_apps_groups_targets_inheritance(void)
-{
- struct ebpf_pid_stat *p = NULL;
-
- // children that do not have a target
- // inherit their target from their parent
- int found = 1, loops = 0;
- while (found) {
- if (unlikely(debug_enabled))
- loops++;
- found = 0;
- for (p = ebpf_root_of_pids; p; p = p->next) {
- // if this process does not have a target
- // and it has a parent
- // and its parent has a target
- // then, set the parent's target to this process
- if (unlikely(!p->target && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if (debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int(
- "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name,
- p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- // find all the procs with 0 childs and merge them to their parents
- // repeat, until nothing more can be done.
- int sortlist = 1;
- found = 1;
- while (found) {
- if (unlikely(debug_enabled))
- loops++;
- found = 0;
-
- for (p = ebpf_root_of_pids; p; p = p->next) {
- if (unlikely(!p->sortlist && !p->children_count))
- p->sortlist = sortlist++;
-
- if (unlikely(
- !p->children_count // if this process does not have any children
- && !p->merged // and is not already merged
- && p->parent // and has a parent
- && p->parent->children_count // and its parent has children
- // and the target of this process and its parent is the same,
- // or the parent does not have a target
- && (p->target == p->parent->target || !p->parent->target) &&
- p->ppid != INIT_PID // and its parent is not init
- )) {
- // mark it as merged
- p->parent->children_count--;
- p->merged = 1;
-
- // the parent inherits the child's target, if it does not have a target itself
- if (unlikely(p->target && !p->parent->target)) {
- p->parent->target = p->target;
-
- if (debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int(
- "TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name,
- p->parent->pid, p->parent->comm, p->pid, p->comm);
- }
-
- found++;
- }
- }
-
- debug_log("TARGET INHERITANCE: merged %d processes", found);
- }
-
- // init goes always to default target
- if (ebpf_all_pids[INIT_PID])
- ebpf_all_pids[INIT_PID]->target = apps_groups_default_target;
-
- // pid 0 goes always to default target
- if (ebpf_all_pids[0])
- ebpf_all_pids[0]->target = apps_groups_default_target;
-
- // give a default target on all top level processes
- if (unlikely(debug_enabled))
- loops++;
- for (p = ebpf_root_of_pids; p; p = p->next) {
- // if the process is not merged itself
- // then is is a top level process
- if (unlikely(!p->merged && !p->target))
- p->target = apps_groups_default_target;
-
- // make sure all processes have a sortlist
- if (unlikely(!p->sortlist))
- p->sortlist = sortlist++;
- }
-
- if (ebpf_all_pids[1])
- ebpf_all_pids[1]->sortlist = sortlist++;
-
- // give a target to all merged child processes
- found = 1;
- while (found) {
- if (unlikely(debug_enabled))
- loops++;
- found = 0;
- for (p = ebpf_root_of_pids; p; p = p->next) {
- if (unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if (debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int(
- "TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.",
- p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
-}
-
-/**
- * Update target timestamp.
- *
- * @param root the targets that will be updated.
- */
-static inline void post_aggregate_targets(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- for (w = root; w; w = w->next) {
- if (w->collected_starttime) {
- if (!w->starttime || w->collected_starttime < w->starttime) {
- w->starttime = w->collected_starttime;
- }
- } else {
- w->starttime = 0;
- }
- }
-}
-
-/**
- * Remove PID from the link list.
- *
- * @param pid the PID that will be removed.
- */
-static inline void del_pid_entry(pid_t pid)
-{
- struct ebpf_pid_stat *p = ebpf_all_pids[pid];
-
- if (unlikely(!p)) {
- netdata_log_error("attempted to free pid %d that is not allocated.", pid);
- return;
- }
-
- debug_log("process %d %s exited, deleting it.", pid, p->comm);
-
- if (ebpf_root_of_pids == p)
- ebpf_root_of_pids = p->next;
-
- if (p->next)
- p->next->prev = p->prev;
- if (p->prev)
- p->prev->next = p->next;
-
- freez(p->stat_filename);
- freez(p->status_filename);
- freez(p->io_filename);
- freez(p->cmdline_filename);
-
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(&ebpf_judy_pid.index.JudyLArray, p->pid);
- if (pid_ptr) {
- if (pid_ptr->socket_stats.JudyLArray) {
- Word_t local_socket = 0;
- Pvoid_t *socket_value;
- bool first_socket = true;
- while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_socket, &first_socket))) {
- netdata_socket_plus_t *socket_clean = *socket_value;
- aral_freez(aral_socket_table, socket_clean);
- }
- JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0);
- }
- JudyLDel(&ebpf_judy_pid.index.JudyLArray, p->pid, PJE0);
- }
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- freez(p->cmdline);
- ebpf_pid_stat_release(p);
-
- ebpf_all_pids[pid] = NULL;
- ebpf_all_pids_count--;
-}
-
-/**
- * Get command string associated with a PID.
- * This can only safely be used when holding the `collect_data_mutex` lock.
- *
- * @param pid the pid to search the data.
- * @param n the maximum amount of bytes to copy into dest.
- * if this is greater than the size of the command, it is clipped.
- * @param dest the target memory buffer to write the command into.
- * @return -1 if the PID hasn't been scraped yet, 0 otherwise.
- */
-int get_pid_comm(pid_t pid, size_t n, char *dest)
-{
- struct ebpf_pid_stat *stat;
-
- stat = ebpf_all_pids[pid];
- if (unlikely(stat == NULL)) {
- return -1;
- }
-
- if (unlikely(n > sizeof(stat->comm))) {
- n = sizeof(stat->comm);
- }
-
- strncpyz(dest, stat->comm, n);
- return 0;
-}
-
-/**
- * Cleanup variable from other threads
- *
- * @param pid current pid.
- */
-void cleanup_variables_from_other_threads(uint32_t pid)
-{
- // Clean cachestat structure
- if (cachestat_pid) {
- ebpf_cachestat_release(cachestat_pid[pid]);
- cachestat_pid[pid] = NULL;
- }
-
- // Clean directory cache structure
- if (dcstat_pid) {
- ebpf_dcstat_release(dcstat_pid[pid]);
- dcstat_pid[pid] = NULL;
- }
-
- // Clean swap structure
- if (swap_pid) {
- freez(swap_pid[pid]);
- swap_pid[pid] = NULL;
- }
-
- // Clean vfs structure
- if (vfs_pid) {
- ebpf_vfs_release(vfs_pid[pid]);
- vfs_pid[pid] = NULL;
- }
-
- // Clean fd structure
- if (fd_pid) {
- ebpf_fd_release(fd_pid[pid]);
- fd_pid[pid] = NULL;
- }
-
- // Clean shm structure
- if (shm_pid) {
- ebpf_shm_release(shm_pid[pid]);
- shm_pid[pid] = NULL;
- }
-}
-
-/**
- * Remove PIDs when they are not running more.
- */
-void cleanup_exited_pids()
-{
- struct ebpf_pid_stat *p = NULL;
-
- for (p = ebpf_root_of_pids; p;) {
- if (!p->updated && (!p->keep || p->keeploops > 0)) {
- if (unlikely(debug_enabled && (p->keep || p->keeploops)))
- debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
-
- pid_t r = p->pid;
- p = p->next;
-
- // Clean process structure
- if (global_process_stats) {
- ebpf_process_stat_release(global_process_stats[r]);
- global_process_stats[r] = NULL;
- }
-
- cleanup_variables_from_other_threads(r);
-
- del_pid_entry(r);
- } else {
- if (unlikely(p->keep))
- p->keeploops++;
- p->keep = 0;
- p = p->next;
- }
- }
-}
-
-/**
- * Read proc filesystem for the first time.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static inline void read_proc_filesystem()
-{
- char dirname[FILENAME_MAX + 1];
-
- snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
- DIR *dir = opendir(dirname);
- if (!dir)
- return;
-
- struct dirent *de = NULL;
-
- while ((de = readdir(dir))) {
- char *endptr = de->d_name;
-
- if (unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
- continue;
-
- pid_t pid = (pid_t)strtoul(de->d_name, &endptr, 10);
-
- // make sure we read a valid number
- if (unlikely(endptr == de->d_name || *endptr != '\0'))
- continue;
-
- collect_data_for_pid(pid, NULL);
- }
- closedir(dir);
-}
-
-/**
- * Aggregated PID on target
- *
- * @param w the target output
- * @param p the pid with information to update
- * @param o never used
- */
-static inline void aggregate_pid_on_target(struct ebpf_target *w, struct ebpf_pid_stat *p, struct ebpf_target *o)
-{
- UNUSED(o);
-
- if (unlikely(!p->updated)) {
- // the process is not running
- return;
- }
-
- if (unlikely(!w)) {
- netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm);
- return;
- }
-
- w->processes++;
- struct ebpf_pid_on_target *pid_on_target = mallocz(sizeof(struct ebpf_pid_on_target));
- pid_on_target->pid = p->pid;
- pid_on_target->next = w->root_pid;
- w->root_pid = pid_on_target;
-}
-
-/**
- * Process Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- ebpf_process_stat_t *total = &out[0];
- for (i = 1; i < end; i++) {
- ebpf_process_stat_t *w = &out[i];
- total->exit_call += w->exit_call;
- total->task_err += w->task_err;
- total->create_thread += w->create_thread;
- total->create_process += w->create_process;
- total->release_call += w->release_call;
- }
-}
-
-/**
- * Collect data for all process
- *
- * Read data from hash table and store it in appropriate vectors.
- * It also creates the link between targets and PIDs.
- *
- * @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
- * @param maps_per_core do I have hash maps per core?
- */
-void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
-{
- if (unlikely(!ebpf_all_pids))
- return;
-
- struct ebpf_pid_stat *pids = ebpf_root_of_pids; // global list of all processes running
- while (pids) {
- if (pids->updated_twice) {
- pids->read = 0; // mark it as not read, so that collect_data_for_pid() will read it
- pids->updated = 0;
- pids->merged = 0;
- pids->children_count = 0;
- pids->parent = NULL;
- } else {
- if (pids->updated)
- pids->updated_twice = 1;
- }
-
- pids = pids->next;
- }
-
- read_proc_filesystem();
-
- uint32_t key;
- pids = ebpf_root_of_pids; // global list of all processes running
- // while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
-
- if (tbl_pid_stats_fd != -1) {
- size_t length = sizeof(ebpf_process_stat_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- ebpf_process_stat_t *w = global_process_stats[key];
- if (!w) {
- w = ebpf_process_stat_get();
- global_process_stats[key] = w;
- }
-
- if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, process_stat_vector)) {
- // Clean Process structures
- ebpf_process_stat_release(w);
- global_process_stats[key] = NULL;
-
- cleanup_variables_from_other_threads(key);
-
- pids = pids->next;
- continue;
- }
-
- ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
-
- memcpy(w, process_stat_vector, sizeof(ebpf_process_stat_t));
-
- memset(process_stat_vector, 0, length);
-
- pids = pids->next;
- }
- }
-
- link_all_processes_to_their_parents();
-
- apply_apps_groups_targets_inheritance();
-
- apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
-
- // this has to be done, before the cleanup
- // // concentrate everything on the targets
- for (pids = ebpf_root_of_pids; pids; pids = pids->next)
- aggregate_pid_on_target(pids->target, pids, NULL);
-
- post_aggregate_targets(apps_groups_root_target);
-}
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
deleted file mode 100644
index 258091507..000000000
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ /dev/null
@@ -1,264 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_APPS_H
-#define NETDATA_EBPF_APPS_H 1
-
-#include "libnetdata/locks/locks.h"
-#include "libnetdata/avl/avl.h"
-#include "libnetdata/clocks/clocks.h"
-#include "libnetdata/config/appconfig.h"
-#include "libnetdata/ebpf/ebpf.h"
-
-#define NETDATA_APPS_FAMILY "apps"
-#define NETDATA_APP_FAMILY "app"
-#define NETDATA_APPS_FILE_GROUP "file_access"
-#define NETDATA_APPS_FILE_FDS "fds"
-#define NETDATA_APPS_FILE_CGROUP_GROUP "file_access (eBPF)"
-#define NETDATA_APPS_PROCESS_GROUP "process (eBPF)"
-#define NETDATA_APPS_NET_GROUP "net"
-#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm"
-
-#include "ebpf_process.h"
-#include "ebpf_dcstat.h"
-#include "ebpf_disk.h"
-#include "ebpf_fd.h"
-#include "ebpf_filesystem.h"
-#include "ebpf_functions.h"
-#include "ebpf_hardirq.h"
-#include "ebpf_cachestat.h"
-#include "ebpf_mdflush.h"
-#include "ebpf_mount.h"
-#include "ebpf_oomkill.h"
-#include "ebpf_shm.h"
-#include "ebpf_socket.h"
-#include "ebpf_softirq.h"
-#include "ebpf_sync.h"
-#include "ebpf_swap.h"
-#include "ebpf_vfs.h"
-
-#define EBPF_MAX_COMPARE_NAME 100
-#define EBPF_MAX_NAME 100
-
-// ----------------------------------------------------------------------------
-// pid_stat
-//
-struct ebpf_target {
- char compare[EBPF_MAX_COMPARE_NAME + 1];
- uint32_t comparehash;
- size_t comparelen;
-
- char id[EBPF_MAX_NAME + 1];
- uint32_t idhash;
- uint32_t charts_created;
-
- char name[EBPF_MAX_NAME + 1];
- char clean_name[EBPF_MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots)
-
- // Changes made to simplify integration between apps and eBPF.
- netdata_publish_cachestat_t cachestat;
- netdata_publish_dcstat_t dcstat;
- netdata_publish_swap_t swap;
- netdata_publish_vfs_t vfs;
- netdata_fd_stat_t fd;
- netdata_publish_shm_t shm;
-
- kernel_uint_t starttime;
- kernel_uint_t collected_starttime;
-
- unsigned int processes; // how many processes have been merged to this
- int exposed; // if set, we have sent this to netdata
- int hidden; // if set, we set the hidden flag on the dimension
- int debug_enabled;
- int ends_with;
- int starts_with; // if set, the compare string matches only the
- // beginning of the command
-
- struct ebpf_pid_on_target *root_pid; // list of aggregated pids for target debugging
-
- struct ebpf_target *target; // the one that will be reported to netdata
- struct ebpf_target *next;
-};
-
-extern struct ebpf_target *apps_groups_default_target;
-extern struct ebpf_target *apps_groups_root_target;
-extern struct ebpf_target *users_root_target;
-extern struct ebpf_target *groups_root_target;
-
-struct ebpf_pid_stat {
- int32_t pid;
- char comm[EBPF_MAX_COMPARE_NAME + 1];
- char *cmdline;
-
- uint32_t log_thrown;
-
- // char state;
- int32_t ppid;
-
- int children_count; // number of processes directly referencing this
- unsigned char keep : 1; // 1 when we need to keep this process in memory even after it exited
- int keeploops; // increases by 1 every time keep is 1 and updated 0
- unsigned char updated : 1; // 1 when the process is currently running
- unsigned char updated_twice : 1; // 1 when the process was running in the previous iteration
- unsigned char merged : 1; // 1 when it has been merged to its parent
- unsigned char read : 1; // 1 when we have already read this process for this iteration
-
- int sortlist; // higher numbers = top on the process tree
-
- // each process gets a unique number
-
- struct ebpf_target *target; // app_groups.conf targets
- struct ebpf_target *user_target; // uid based targets
- struct ebpf_target *group_target; // gid based targets
-
- usec_t stat_collected_usec;
- usec_t last_stat_collected_usec;
-
- char *stat_filename;
- char *status_filename;
- char *io_filename;
- char *cmdline_filename;
-
- struct ebpf_pid_stat *parent;
- struct ebpf_pid_stat *prev;
- struct ebpf_pid_stat *next;
-};
-
-// ----------------------------------------------------------------------------
-// target
-//
-// target is the structure that processes are aggregated to be reported
-// to netdata.
-//
-// - Each entry in /etc/apps_groups.conf creates a target.
-// - Each user and group used by a process in the system, creates a target.
-struct ebpf_pid_on_target {
- int32_t pid;
- struct ebpf_pid_on_target *next;
-};
-
-// ----------------------------------------------------------------------------
-// Structures used to read information from kernel ring
-typedef struct ebpf_process_stat {
- uint64_t pid_tgid; // This cannot be removed, because it is used inside kernel ring.
- uint32_t pid;
-
- //Counter
- uint32_t exit_call;
- uint32_t release_call;
- uint32_t create_process;
- uint32_t create_thread;
-
- //Counter
- uint32_t task_err;
-
- uint8_t removeme;
-} ebpf_process_stat_t;
-
-/**
- * Internal function used to write debug messages.
- *
- * @param fmt the format to create the message.
- * @param ... the arguments to fill the format.
- */
-static inline void debug_log_int(const char *fmt, ...)
-{
- va_list args;
-
- fprintf(stderr, "apps.plugin: ");
- va_start(args, fmt);
- vfprintf(stderr, fmt, args);
- va_end(args);
-
- fputc('\n', stderr);
-}
-
-// ----------------------------------------------------------------------------
-// Exported variabled and functions
-//
-extern struct ebpf_pid_stat **ebpf_all_pids;
-
-int ebpf_read_apps_groups_conf(struct ebpf_target **apps_groups_default_target,
- struct ebpf_target **apps_groups_root_target,
- const char *path,
- const char *file);
-
-void clean_apps_groups_target(struct ebpf_target *apps_groups_root_target);
-
-size_t zero_all_targets(struct ebpf_target *root);
-
-int am_i_running_as_root();
-
-void cleanup_exited_pids();
-
-int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
-
-int get_pid_comm(pid_t pid, size_t n, char *dest);
-
-void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core);
-void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
-
-extern ebpf_process_stat_t **global_process_stats;
-extern netdata_publish_cachestat_t **cachestat_pid;
-extern netdata_publish_dcstat_t **dcstat_pid;
-extern netdata_publish_swap_t **swap_pid;
-extern netdata_publish_vfs_t **vfs_pid;
-extern netdata_fd_stat_t **fd_pid;
-extern netdata_publish_shm_t **shm_pid;
-
-// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
-// this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/libnetdata/aral).
-#ifndef NETDATA_EBPF_ALLOC_MAX_PID
-# define NETDATA_EBPF_ALLOC_MAX_PID 1024
-#endif
-#define NETDATA_EBPF_ALLOC_MIN_ELEMENTS 256
-
-// ARAL Sectiion
-extern void ebpf_aral_init(void);
-
-extern ebpf_process_stat_t *ebpf_process_stat_get(void);
-extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
-extern ebpf_process_stat_t *process_stat_vector;
-
-extern ARAL *ebpf_aral_socket_pid;
-void ebpf_socket_aral_init();
-ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void);
-
-extern ARAL *ebpf_aral_cachestat_pid;
-void ebpf_cachestat_aral_init();
-netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void);
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
-
-extern ARAL *ebpf_aral_dcstat_pid;
-void ebpf_dcstat_aral_init();
-netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void);
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
-
-extern ARAL *ebpf_aral_vfs_pid;
-void ebpf_vfs_aral_init();
-netdata_publish_vfs_t *ebpf_vfs_get(void);
-void ebpf_vfs_release(netdata_publish_vfs_t *stat);
-
-extern ARAL *ebpf_aral_fd_pid;
-void ebpf_fd_aral_init();
-netdata_fd_stat_t *ebpf_fd_stat_get(void);
-void ebpf_fd_release(netdata_fd_stat_t *stat);
-
-extern ARAL *ebpf_aral_shm_pid;
-void ebpf_shm_aral_init();
-netdata_publish_shm_t *ebpf_shm_stat_get(void);
-void ebpf_shm_release(netdata_publish_shm_t *stat);
-
-// ARAL Section end
-
-// Threads integrated with apps
-extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
-// Threads integrated with apps
-
-#include "libnetdata/threads/threads.h"
-
-// ARAL variables
-extern ARAL *ebpf_aral_apps_pid_stat;
-extern ARAL *ebpf_aral_process_stat;
-#define NETDATA_EBPF_PROC_ARAL_NAME "ebpf_proc_stat"
-
-#endif /* NETDATA_EBPF_APPS_H */
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
deleted file mode 100644
index d9f8f7b06..000000000
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ /dev/null
@@ -1,1591 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_cachestat.h"
-
-static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
- "miss" };
-static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
-static netdata_publish_syscall_t cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_END];
-
-netdata_cachestat_pid_t *cachestat_vector = NULL;
-
-static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
-static netdata_idx_t *cachestat_values = NULL;
-
-ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
-#endif
- }};
-
-struct config cachestat_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "mark_buffer_dirty", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_dirtied",
- "__set_page_dirty", "__folio_mark_dirty" };
-
-#ifdef NETDATA_DEV_MODE
-int cachestat_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects
- */
-static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false);
-}
-
-/*
- * Disable specific probe
- *
- * Disable probes according the kernel version
- *
- * @param obj is the main structure for bpf objects
- */
-static void ebpf_cachestat_disable_specific_probe(struct cachestat_bpf *obj)
-{
- if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
- } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
- }
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_add_to_page_cache_lru_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_mark_page_accessed_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/*
- * Disable specific trampoline
- *
- * Disable trampoline according to kernel version.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_cachestat_disable_specific_trampoline(struct cachestat_bpf *obj)
-{
- if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
- } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
- }
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_add_to_page_cache_lru_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_mark_page_accessed_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
-
- if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
- bpf_program__set_attach_target(obj->progs.netdata_folio_mark_dirty_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
- bpf_program__set_attach_target(obj->progs.netdata_set_page_dirty_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- } else {
- bpf_program__set_attach_target(obj->progs.netdata_account_page_dirtied_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- }
-
- bpf_program__set_attach_target(obj->progs.netdata_mark_buffer_dirty_fentry, 0,
- cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
-}
-
-/**
- * Mount Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
-{
- obj->links.netdata_add_to_page_cache_lru_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_add_to_page_cache_lru_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
- int ret = libbpf_get_error(obj->links.netdata_add_to_page_cache_lru_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_mark_page_accessed_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_page_accessed_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
- ret = libbpf_get_error(obj->links.netdata_mark_page_accessed_kprobe);
- if (ret)
- return -1;
-
- if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
- obj->links.netdata_folio_mark_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_folio_mark_dirty_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- ret = libbpf_get_error(obj->links.netdata_folio_mark_dirty_kprobe);
- } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
- account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
- obj->links.netdata_set_page_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_set_page_dirty_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- ret = libbpf_get_error(obj->links.netdata_set_page_dirty_kprobe);
- } else {
- obj->links.netdata_account_page_dirtied_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_account_page_dirtied_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- ret = libbpf_get_error(obj->links.netdata_account_page_dirtied_kprobe);
- }
-
- if (ret)
- return -1;
-
- obj->links.netdata_mark_buffer_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_mark_buffer_dirty_kprobe,
- false,
- cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
- ret = libbpf_get_error(obj->links.netdata_mark_buffer_dirty_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_release_task_kprobe);
- if (ret)
- return -1;
-
- return 0;
-}
-
-/**
- * Adjust Map Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_cachestat_adjust_map(struct cachestat_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS],
- em, bpf_map__name(obj->maps.cstat_pid));
-
- ebpf_update_map_type(obj->maps.cstat_global, &cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS]);
- ebpf_update_map_type(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS]);
- ebpf_update_map_type(obj->maps.cstat_ctrl, &cachestat_maps[NETDATA_CACHESTAT_CTRL]);
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_cachestat_set_hash_tables(struct cachestat_bpf *obj)
-{
- cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.cstat_global);
- cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.cstat_pid);
- cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd = bpf_map__fd(obj->maps.cstat_ctrl);
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_cachestat_disable_release_task(struct cachestat_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode;
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_cachestat_disable_probe(obj);
- ebpf_cachestat_disable_specific_trampoline(obj);
-
- netdata_set_trampoline_target(obj);
- } else {
- ebpf_cachestat_disable_trampoline(obj);
- ebpf_cachestat_disable_specific_probe(obj);
- }
-
- ebpf_cachestat_adjust_map(obj, em);
-
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_cachestat_disable_release_task(obj);
-
- int ret = cachestat_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- ret = (test == EBPF_LOAD_TRAMPOLINE) ? cachestat_bpf__attach(obj) : ebpf_cachestat_attach_probe(obj);
- if (!ret) {
- ebpf_cachestat_set_hash_tables(obj);
-
- ebpf_update_controller(cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd, em);
- }
-
- return ret;
-}
-#endif
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "",
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT,
- 21100,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_CACHESTAT_DIRTY_CHART,
- "",
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT,
- 21101,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_CACHESTAT_HIT_CHART,
- "",
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT,
- 21102,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_CACHESTAT_MISSES_CHART,
- "",
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT,
- 21103,
- em->update_every);
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_cachestat_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_cachestat_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_cachestat_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "",
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21100,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_CACHESTAT_DIRTY_CHART,
- "",
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21101,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_CACHESTAT_HIT_CHART,
- "",
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21102,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_CACHESTAT_MISSES_CHART,
- "",
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21103,
- em->update_every);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_CACHESTAT_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_hit_ratio",
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- "app.ebpf_cachestat_hit_ratio",
- 20260,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_dirty_pages",
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_cachestat_dirty_pages",
- 20261,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_access",
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_cachestat_access",
- 20262,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_misses",
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_cachestat_misses",
- 20263,
- update_every);
- w->charts_created &= ~(1<<EBPF_MODULE_CACHESTAT_IDX);
- }
-}
-
-/**
- * Cachestat exit.
- *
- * Cancel child and exit.
- *
- * @param ptr thread data.
- */
-static void ebpf_cachestat_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_cachestat_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_cachestat_apps_charts(em);
- }
-
- ebpf_obsolete_cachestat_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- ebpf_statistic_obsolete_aral_chart(em, cachestat_disable_priority);
-#endif
-
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (cachestat_bpf_obj) {
- cachestat_bpf__destroy(cachestat_bpf_obj);
- cachestat_bpf_obj = NULL;
- }
-#endif
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * COMMON FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Update publish
- *
- * Update publish values before to write dimension.
- *
- * @param out structure that will receive data.
- * @param mpa calls for mark_page_accessed during the last second.
- * @param mbd calls for mark_buffer_dirty during the last second.
- * @param apcl calls for add_to_page_cache_lru during the last second.
- * @param apd calls for account_page_dirtied during the last second.
- */
-void cachestat_update_publish(netdata_publish_cachestat_t *out, uint64_t mpa, uint64_t mbd,
- uint64_t apcl, uint64_t apd)
-{
- // Adapted algorithm from https://github.com/iovisor/bcc/blob/master/tools/cachestat.py#L126-L138
- NETDATA_DOUBLE total = (NETDATA_DOUBLE) (((long long)mpa) - ((long long)mbd));
- if (total < 0)
- total = 0;
-
- NETDATA_DOUBLE misses = (NETDATA_DOUBLE) ( ((long long) apcl) - ((long long) apd) );
- if (misses < 0)
- misses = 0;
-
- // If hits are < 0, then its possible misses are overestimate due to possibly page cache read ahead adding
- // more pages than needed. In this case just assume misses as total and reset hits.
- NETDATA_DOUBLE hits = total - misses;
- if (hits < 0 ) {
- misses = total;
- hits = 0;
- }
-
- NETDATA_DOUBLE ratio = (total > 0) ? hits/total : 1;
-
- out->ratio = (long long )(ratio*100);
- out->hit = (long long)hits;
- out->miss = (long long)misses;
-}
-
-/**
- * Save previous values
- *
- * Save values used this time.
- *
- * @param publish
- */
-static void save_previous_values(netdata_publish_cachestat_t *publish) {
- publish->prev.mark_page_accessed = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED];
- publish->prev.account_page_dirtied = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED];
- publish->prev.add_to_page_cache_lru = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU];
- publish->prev.mark_buffer_dirty = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY];
-}
-
-/**
- * Calculate statistics
- *
- * @param publish the structure where we will store the data.
- */
-static void calculate_stats(netdata_publish_cachestat_t *publish) {
- if (!publish->prev.mark_page_accessed) {
- save_previous_values(publish);
- return;
- }
-
- uint64_t mpa = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED] - publish->prev.mark_page_accessed;
- uint64_t mbd = cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY] - publish->prev.mark_buffer_dirty;
- uint64_t apcl = cachestat_hash_values[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU] - publish->prev.add_to_page_cache_lru;
- uint64_t apd = cachestat_hash_values[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED] - publish->prev.account_page_dirtied;
-
- save_previous_values(publish);
-
- // We are changing the original algorithm to have a smooth ratio.
- cachestat_update_publish(publish, mpa, mbd, apcl, apd);
-}
-
-
-/*****************************************************************
- *
- * APPS
- *
- *****************************************************************/
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_cachestat_pid_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_cachestat_pid_t *w = &out[i];
- total->account_page_dirtied += w->account_page_dirtied;
- total->add_to_page_cache_lru += w->add_to_page_cache_lru;
- total->mark_buffer_dirty += w->mark_buffer_dirty;
- total->mark_page_accessed += w->mark_page_accessed;
- }
-}
-
-/**
- * Save Pid values
- *
- * Save the current values inside the structure
- *
- * @param out vector used to plot charts
- * @param publish vector with values read from hash tables.
- */
-static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *publish)
-{
- if (!out->current.mark_page_accessed) {
- memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
- return;
- }
-
- memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_pid_t));
- memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *publish)
-{
- netdata_publish_cachestat_t *curr = cachestat_pid[current_pid];
- if (!curr) {
- curr = ebpf_publish_cachestat_get();
- cachestat_pid[current_pid] = curr;
-
- cachestat_save_pid_values(curr, publish);
- return;
- }
-
- cachestat_save_pid_values(curr, publish);
-}
-
-/**
- * Read APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_read_cachestat_apps_table(int maps_per_core)
-{
- netdata_cachestat_pid_t *cv = cachestat_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
- }
-
- cachestat_apps_accumulator(cv, maps_per_core);
-
- cachestat_fill_pid(key, cv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_cachestat_cgroup(int maps_per_core)
-{
- netdata_cachestat_pid_t *cv = cachestat_vector;
- int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- ebpf_cgroup_target_t *ect;
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_cachestat_pid_t *out = &pids->cachestat;
- if (likely(cachestat_pid) && cachestat_pid[pid]) {
- netdata_publish_cachestat_t *in = cachestat_pid[pid];
-
- memcpy(out, &in->current, sizeof(netdata_cachestat_pid_t));
- } else {
- memset(cv, 0, length);
- if (bpf_map_lookup_elem(fd, &pid, cv)) {
- continue;
- }
-
- cachestat_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_cachestat_pid_t));
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_hit_ratio",
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- "app.ebpf_cachestat_hit_ratio",
- 20260,
- update_every,
- NETDATA_EBPF_MODULE_NAME_CACHESTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION ratio '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_dirty_pages",
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- "app.ebpf_cachestat_dirty_pages",
- 20261,
- update_every,
- NETDATA_EBPF_MODULE_NAME_CACHESTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION pages '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_access",
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_cachestat_access",
- 20262,
- update_every,
- NETDATA_EBPF_MODULE_NAME_CACHESTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION hits '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_cachestat_misses",
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES,
- NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_cachestat_misses",
- 20263,
- update_every,
- NETDATA_EBPF_MODULE_NAME_CACHESTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION misses '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- w->charts_created |= 1<<EBPF_MODULE_CACHESTAT_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/*****************************************************************
- *
- * MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Read global counter
- *
- * Read the table with number of calls for all functions
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_cachestat_read_global_tables(netdata_idx_t *stats, int maps_per_core)
-{
- ebpf_read_global_table_stats(cachestat_hash_values,
- cachestat_values,
- cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd,
- maps_per_core,
- NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
- NETDATA_CACHESTAT_END);
-
- ebpf_read_global_table_stats(stats,
- cachestat_values,
- cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-}
-
-/**
- * Send global
- *
- * Send global charts to Netdata
- */
-static void cachestat_send_global(netdata_publish_cachestat_t *publish)
-{
- calculate_stats(publish);
-
- netdata_publish_syscall_t *ptr = cachestat_counter_publish_aggregated;
- ebpf_one_dimension_write_charts(
- NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART, ptr[NETDATA_CACHESTAT_IDX_RATIO].dimension,
- publish->ratio);
-
- ebpf_one_dimension_write_charts(
- NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, ptr[NETDATA_CACHESTAT_IDX_DIRTY].dimension,
- cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY]);
-
- ebpf_one_dimension_write_charts(
- NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, ptr[NETDATA_CACHESTAT_IDX_HIT].dimension, publish->hit);
-
- ebpf_one_dimension_write_charts(
- NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART, ptr[NETDATA_CACHESTAT_IDX_MISS].dimension,
- publish->miss);
-}
-
-/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct ebpf_pid_on_target *root)
-{
- memcpy(&publish->prev, &publish->current,sizeof(publish->current));
- memset(&publish->current, 0, sizeof(publish->current));
-
- netdata_cachestat_pid_t *dst = &publish->current;
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_cachestat_t *w = cachestat_pid[pid];
- if (w) {
- netdata_cachestat_pid_t *src = &w->current;
- dst->account_page_dirtied += src->account_page_dirtied;
- dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
- dst->mark_buffer_dirty += src->mark_buffer_dirty;
- dst->mark_page_accessed += src->mark_page_accessed;
- }
-
- root = root->next;
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param root the target list.
-*/
-void ebpf_cache_send_apps_data(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- collected_number value;
-
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_CACHESTAT_IDX))))
- continue;
-
- ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid);
- netdata_cachestat_pid_t *current = &w->cachestat.current;
- netdata_cachestat_pid_t *prev = &w->cachestat.prev;
-
- uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
- uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
- w->cachestat.dirty = mbd;
- uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
- uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
-
- cachestat_update_publish(&w->cachestat, mpa, mbd, apcl, apd);
-
- value = (collected_number) w->cachestat.ratio;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_hit_ratio");
- write_chart_dimension("ratio", value);
- ebpf_write_end_chart();
-
- value = (collected_number) w->cachestat.dirty;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_dirty_pages");
- write_chart_dimension("pages", value);
- ebpf_write_end_chart();
-
- value = (collected_number) w->cachestat.hit;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_access");
- write_chart_dimension("hits", value);
- ebpf_write_end_chart();
-
- value = (collected_number) w->cachestat.miss;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_cachestat_misses");
- write_chart_dimension("misses", value);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_cachestat_sum_cgroup_pids(netdata_publish_cachestat_t *publish, struct pid_on_target2 *root)
-{
- memcpy(&publish->prev, &publish->current,sizeof(publish->current));
- memset(&publish->current, 0, sizeof(publish->current));
-
- netdata_cachestat_pid_t *dst = &publish->current;
- while (root) {
- netdata_cachestat_pid_t *src = &root->cachestat;
-
- dst->account_page_dirtied += src->account_page_dirtied;
- dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
- dst->mark_buffer_dirty += src->mark_buffer_dirty;
- dst->mark_page_accessed += src->mark_page_accessed;
-
- root = root->next;
- }
-}
-
-/**
- * Calc chart values
- *
- * Do necessary math to plot charts.
- */
-void ebpf_cachestat_calc_chart_values()
-{
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_cachestat_sum_cgroup_pids(&ect->publish_cachestat, ect->pids);
-
- netdata_cachestat_pid_t *current = &ect->publish_cachestat.current;
- netdata_cachestat_pid_t *prev = &ect->publish_cachestat.prev;
-
- uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
- uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
- ect->publish_cachestat.dirty = mbd;
- uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
- uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
-
- cachestat_update_publish(&ect->publish_cachestat, mpa, mbd, apcl, apd);
- }
-}
-
-/**
- * Create Systemd cachestat Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_cachestat_charts(int update_every)
-{
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21100,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_DIRTY_CHART,
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21101,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_CHART, "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21102,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_MISSES_CHART, "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21103,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
-}
-
-/**
- * Send Cache Stat charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_cachestat_charts()
-{
- ebpf_cgroup_target_t *ect;
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.dirty);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.hit);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.miss);
- }
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Directory Cache charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_specific_cachestat_data(char *type, netdata_publish_cachestat_t *npc)
-{
- ebpf_write_begin_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART, "");
- write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_RATIO].name, (long long)npc->ratio);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_CACHESTAT_DIRTY_CHART, "");
- write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY].name, (long long)npc->dirty);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_CACHESTAT_HIT_CHART, "");
- write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT].name, (long long)npc->hit);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_CACHESTAT_MISSES_CHART, "");
- write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS].name, (long long)npc->miss);
- ebpf_write_end_chart();
-}
-
-/**
- * Create specific cache Stat charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
-{
- ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
- NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200,
- ebpf_create_global_dimension,
- cachestat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(type, NETDATA_CACHESTAT_DIRTY_CHART,
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
- NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_CHART,
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_CGROUP_SUBMENU,
- NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(type, NETDATA_CACHESTAT_MISSES_CHART,
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_CGROUP_SUBMENU,
- NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-}
-
-/**
- * Obsolete specific cache stat charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every)
-{
- ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "",
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_DIRTY_CHART,
- "",
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_CHART,
- "",
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_MISSES_CHART,
- "",
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203, update_every);
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-void ebpf_cachestat_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- ebpf_cachestat_calc_chart_values();
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_cachestat_charts(update_every);
- }
-
- ebpf_send_systemd_cachestat_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) && ect->updated) {
- ebpf_create_specific_cachestat_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) {
- if (ect->updated) {
- ebpf_send_specific_cachestat_data(ect->name, &ect->publish_cachestat);
- } else {
- ebpf_obsolete_specific_cachestat_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void cachestat_collector(ebpf_module_t *em)
-{
- netdata_publish_cachestat_t publish;
- memset(&publish, 0, sizeof(publish));
- int cgroups = em->cgroup_charts;
- int update_every = em->update_every;
- int maps_per_core = em->maps_per_core;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- //This will be cancelled by its parent
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_cachestat_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- ebpf_read_cachestat_apps_table(maps_per_core);
-
- if (cgroups)
- ebpf_update_cachestat_cgroup(maps_per_core);
-
- pthread_mutex_lock(&lock);
-
- cachestat_send_global(&publish);
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_cache_send_apps_data(apps_groups_root_target);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- ebpf_send_data_aral_chart(ebpf_aral_cachestat_pid, em);
-#endif
-
- if (cgroups)
- ebpf_cachestat_send_cgroup_data(update_every);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * INITIALIZE THREAD
- *
- *****************************************************************/
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_create_memory_charts(ebpf_module_t *em)
-{
- ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21100,
- ebpf_create_global_dimension,
- cachestat_counter_publish_aggregated, 1, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART,
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21101,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1,
- em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART,
- "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21102,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1,
- em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART,
- "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21103,
- ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1,
- em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
-
- fflush(stdout);
-}
-
-/**
- * Allocate vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_cachestat_allocate_global_vectors(int apps)
-{
- if (apps) {
- cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
- ebpf_cachestat_aral_init();
- cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
- }
-
- cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-
- memset(cachestat_hash_values, 0, NETDATA_CACHESTAT_END * sizeof(netdata_idx_t));
- memset(cachestat_counter_aggregated_data, 0, NETDATA_CACHESTAT_END * sizeof(netdata_syscall_stat_t));
- memset(cachestat_counter_publish_aggregated, 0, NETDATA_CACHESTAT_END * sizeof(netdata_publish_syscall_t));
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/**
- * Update Internal value
- *
- * Update values used during runtime.
- *
- * @return It returns 0 when one of the functions is present and -1 otherwise.
- */
-static int ebpf_cachestat_set_internal_value()
-{
- ebpf_addresses_t address = {.function = NULL, .hash = 0, .addr = 0};
- int i;
- for (i = 0; i < NETDATA_CACHESTAT_ACCOUNT_DIRTY_END ; i++) {
- address.function = account_page[i];
- ebpf_load_addresses(&address, -1);
- if (address.addr)
- break;
- }
-
- if (!address.addr) {
- netdata_log_error("%s cachestat.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND);
- return -1;
- }
-
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = address.function;
-
- return 0;
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(cachestat_maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- cachestat_bpf_obj = cachestat_bpf__open();
- if (!cachestat_bpf_obj)
- ret = -1;
- else
- ret = ebpf_cachestat_load_and_attach(cachestat_bpf_obj, em);
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Cachestat thread
- *
- * Thread used to make cachestat thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_cachestat_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_cachestat_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = cachestat_maps;
-
- ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
-
- if (ebpf_cachestat_set_internal_value()) {
- goto endcachestat;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_cachestat_load_bpf(em)) {
- goto endcachestat;
- }
-
- ebpf_cachestat_allocate_global_vectors(em->apps_charts);
-
- int algorithms[NETDATA_CACHESTAT_END] = {
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
- };
-
- ebpf_global_labels(cachestat_counter_aggregated_data, cachestat_counter_publish_aggregated,
- cachestat_counter_dimension_name, cachestat_counter_dimension_name,
- algorithms, NETDATA_CACHESTAT_END);
-
- pthread_mutex_lock(&lock);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- ebpf_create_memory_charts(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- cachestat_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_CACHESTAT_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- cachestat_collector(em);
-
-endcachestat:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
deleted file mode 100644
index ba2b12833..000000000
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_CACHESTAT_H
-#define NETDATA_EBPF_CACHESTAT_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_CACHESTAT "cachestat"
-#define NETDATA_EBPF_CACHESTAT_MODULE_DESC "Monitor Linux page cache internal functions. This thread is integrated with apps and cgroup."
-
-// charts
-#define NETDATA_CACHESTAT_HIT_RATIO_CHART "cachestat_ratio"
-#define NETDATA_CACHESTAT_DIRTY_CHART "cachestat_dirties"
-#define NETDATA_CACHESTAT_HIT_CHART "cachestat_hits"
-#define NETDATA_CACHESTAT_MISSES_CHART "cachestat_misses"
-
-#define NETDATA_CACHESTAT_SUBMENU "page_cache"
-#define NETDATA_CACHESTAT_CGROUP_SUBMENU "page cache (eBPF)"
-
-#define EBPF_CACHESTAT_DIMENSION_PAGE "pages/s"
-#define EBPF_CACHESTAT_DIMENSION_HITS "hits/s"
-#define EBPF_CACHESTAT_DIMENSION_MISSES "misses/s"
-
-// configuration file
-#define NETDATA_CACHESTAT_CONFIG_FILE "cachestat.conf"
-
-// Contexts
-#define NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT "cgroup.cachestat_ratio"
-#define NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT "cgroup.cachestat_dirties"
-#define NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT "cgroup.cachestat_hits"
-#define NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT "cgroup.cachestat_misses"
-
-#define NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT "services.cachestat_ratio"
-#define NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT "services.cachestat_dirties"
-#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "services.cachestat_hits"
-#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "services.cachestat_misses"
-
-// ARAL Name
-#define NETDATA_EBPF_CACHESTAT_ARAL_NAME "ebpf_cachestat"
-
-// variables
-enum cachestat_counters {
- NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
- NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED,
- NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED,
- NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY,
-
- NETDATA_CACHESTAT_END
-};
-
-enum cachestat_account_dirty_pages {
- NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY,
- NETDATA_CACHESTAT_SET_PAGE_DIRTY,
- NETDATA_CACHESTAT_FOLIO_DIRTY,
-
- NETDATA_CACHESTAT_ACCOUNT_DIRTY_END
-};
-
-enum cachestat_indexes {
- NETDATA_CACHESTAT_IDX_RATIO,
- NETDATA_CACHESTAT_IDX_DIRTY,
- NETDATA_CACHESTAT_IDX_HIT,
- NETDATA_CACHESTAT_IDX_MISS
-};
-
-enum cachestat_tables {
- NETDATA_CACHESTAT_GLOBAL_STATS,
- NETDATA_CACHESTAT_PID_STATS,
- NETDATA_CACHESTAT_CTRL
-};
-
-typedef struct netdata_publish_cachestat_pid {
- uint64_t add_to_page_cache_lru;
- uint64_t mark_page_accessed;
- uint64_t account_page_dirtied;
- uint64_t mark_buffer_dirty;
-} netdata_cachestat_pid_t;
-
-typedef struct netdata_publish_cachestat {
- long long ratio;
- long long dirty;
- long long hit;
- long long miss;
-
- netdata_cachestat_pid_t current;
- netdata_cachestat_pid_t prev;
-} netdata_publish_cachestat_t;
-
-void *ebpf_cachestat_thread(void *ptr);
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
-
-extern struct config cachestat_config;
-extern netdata_ebpf_targets_t cachestat_targets[];
-extern ebpf_local_maps_t cachestat_maps[];
-
-#endif // NETDATA_EBPF_CACHESTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c
deleted file mode 100644
index 1aadfbaf8..000000000
--- a/collectors/ebpf.plugin/ebpf_cgroup.c
+++ /dev/null
@@ -1,392 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/resource.h>
-
-#include "ebpf.h"
-#include "ebpf_cgroup.h"
-
-ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
-static void *ebpf_mapped_memory = NULL;
-int send_cgroup_chart = 0;
-
-// --------------------------------------------------------------------------------------------------------------------
-// Map shared memory
-
-/**
- * Map Shared Memory locally
- *
- * Map the shared memory for current process
- *
- * @param fd file descriptor returned after shm_open was called.
- * @param length length of the shared memory
- *
- * @return It returns a pointer to the region mapped on success and MAP_FAILED otherwise.
- */
-static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
-{
- void *value;
-
- value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if (!value) {
- netdata_log_error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen");
- close(shm_fd_ebpf_cgroup);
- shm_fd_ebpf_cgroup = -1;
- shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
- }
-
- return value;
-}
-
-/**
- * Unmap Shared Memory
- *
- * Unmap shared memory used to integrate eBPF and cgroup plugin
- */
-void ebpf_unmap_cgroup_shared_memory()
-{
- munmap(ebpf_mapped_memory, shm_ebpf_cgroup.header->body_length);
-}
-
-/**
- * Map cgroup shared memory
- *
- * Map cgroup shared memory from cgroup to plugin
- */
-void ebpf_map_cgroup_shared_memory()
-{
- static int limit_try = 0;
- static time_t next_try = 0;
-
- if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES)
- return;
-
- time_t curr_time = time(NULL);
- if (curr_time < next_try)
- return;
-
- limit_try++;
- next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
-
- if (shm_fd_ebpf_cgroup < 0) {
- shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
- if (shm_fd_ebpf_cgroup < 0) {
- if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
- netdata_log_error("Shared memory was not initialized, integration between processes won't happen.");
-
- return;
- }
- }
-
- // Map only header
- void *mapped = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
- sizeof(netdata_ebpf_cgroup_shm_header_t));
- if (unlikely(mapped == SEM_FAILED)) {
- return;
- }
- netdata_ebpf_cgroup_shm_header_t *header = mapped;
-
- size_t length = header->body_length;
-
- munmap(header, sizeof(netdata_ebpf_cgroup_shm_header_t));
-
- if (length <= ((sizeof(netdata_ebpf_cgroup_shm_header_t) + sizeof(netdata_ebpf_cgroup_shm_body_t)))) {
- return;
- }
-
- ebpf_mapped_memory = (void *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
- if (unlikely(ebpf_mapped_memory == MAP_FAILED)) {
- return;
- }
- shm_ebpf_cgroup.header = ebpf_mapped_memory;
- shm_ebpf_cgroup.body = ebpf_mapped_memory + sizeof(netdata_ebpf_cgroup_shm_header_t);
-
- shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
-
- if (shm_sem_ebpf_cgroup == SEM_FAILED) {
- netdata_log_error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
- limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
- munmap(ebpf_mapped_memory, length);
- shm_ebpf_cgroup.header = NULL;
- shm_ebpf_cgroup.body = NULL;
- close(shm_fd_ebpf_cgroup);
- shm_fd_ebpf_cgroup = -1;
- shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// Close and Cleanup
-
-/**
- * Clean Specific cgroup pid
- *
- * Clean all PIDs associated with cgroup.
- *
- * @param pt structure pid on target that will have your PRs removed
- */
-static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt)
-{
- while (pt) {
- struct pid_on_target2 *next_pid = pt->next;
-
- freez(pt);
- pt = next_pid;
- }
-}
-
-/**
- * Remove Cgroup Update Target Update List
- *
- * Remove from cgroup target and update the link list
- */
-static void ebpf_remove_cgroup_target_update_list()
-{
- ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids;
- ebpf_cgroup_target_t *prev = ebpf_cgroup_pids;
- while (ect) {
- next = ect->next;
- if (!ect->updated) {
- if (ect == ebpf_cgroup_pids) {
- ebpf_cgroup_pids = next;
- prev = next;
- } else {
- prev->next = next;
- }
-
- ebpf_clean_specific_cgroup_pids(ect->pids);
- freez(ect);
- } else {
- prev = ect;
- }
-
- ect = next;
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// Fill variables
-
-/**
- * Set Target Data
- *
- * Set local variable values according shared memory information.
- *
- * @param out local output variable.
- * @param ptr input from shared memory.
- */
-static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr)
-{
- out->hash = ptr->hash;
- snprintfz(out->name, 255, "%s", ptr->name);
- out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
- out->updated = 1;
-}
-
-/**
- * Find or create
- *
- * Find the structure inside the link list or allocate and link when it is not present.
- *
- * @param ptr Input from shared memory.
- *
- * @return It returns a pointer for the structure associated with the input.
- */
-static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr)
-{
- ebpf_cgroup_target_t *ect, *prev;
- for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) {
- if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) {
- ect->updated = 1;
- return ect;
- }
- }
-
- ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t));
-
- ebpf_cgroup_set_target_data(new_ect, ptr);
- if (!ebpf_cgroup_pids) {
- ebpf_cgroup_pids = new_ect;
- } else {
- prev->next = new_ect;
- }
-
- return new_ect;
-}
-
-/**
- * Update pid link list
- *
- * Update PIDs list associated with specific cgroup.
- *
- * @param ect cgroup structure where pids will be stored
- * @param path file with PIDs associated to cgroup.
- */
-static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path)
-{
- procfile *ff = procfile_open_no_log(path, " \t:", PROCFILE_FLAG_DEFAULT);
- if (!ff)
- return;
-
- ff = procfile_readall(ff);
- if (!ff)
- return;
-
- size_t lines = procfile_lines(ff), l;
- for (l = 0; l < lines ;l++) {
- int pid = (int)str2l(procfile_lineword(ff, l, 0));
- if (pid) {
- struct pid_on_target2 *pt, *prev;
- for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) {
- if (pt->pid == pid)
- break;
- }
-
- if (!pt) {
- struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2));
- w->pid = pid;
- if (!ect->pids)
- ect->pids = w;
- else
- prev->next = w;
- }
- }
- }
-
- procfile_close(ff);
-}
-
-/**
- * Set remove var
- *
- * Set variable remove. If this variable is not reset, the structure will be removed from link list.
- */
-void ebpf_reset_updated_var()
- {
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- ect->updated = 0;
- }
- }
-
-/**
- * Parse cgroup shared memory
- *
- * This function is responsible to copy necessary data from shared memory to local memory.
- */
-void ebpf_parse_cgroup_shm_data()
-{
- static int previous = 0;
- if (!shm_ebpf_cgroup.header || shm_sem_ebpf_cgroup == SEM_FAILED)
- return;
-
- sem_wait(shm_sem_ebpf_cgroup);
- int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
- if (end <= 0) {
- sem_post(shm_sem_ebpf_cgroup);
- return;
- }
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_remove_cgroup_target_update_list();
-
- ebpf_reset_updated_var();
-
- for (i = 0; i < end; i++) {
- netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
- if (ptr->enabled) {
- ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
- ebpf_update_pid_link_list(ect, ptr->path);
- }
- }
- send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
- previous = shm_ebpf_cgroup.header->cgroup_root_count;
- sem_post(shm_sem_ebpf_cgroup);
- pthread_mutex_unlock(&mutex_cgroup_shm);
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Updating cgroup %d (Previous: %d, Current: %d)",
- send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
-#endif
-
- sem_post(shm_sem_ebpf_cgroup);
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// Create charts
-
-/**
- * Create charts on systemd submenu
- *
- * @param id the chart id
- * @param title the value displayed on vertical axis.
- * @param units the value displayed on vertical axis.
- * @param family Submenu that the chart will be attached on dashboard.
- * @param charttype chart type
- * @param order the chart order
- * @param algorithm the algorithm used by dimension
- * @param context add context for chart
- * @param module chart module name, this is the eBPF thread.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, char *context, char *module, int update_every)
-{
- ebpf_cgroup_target_t *w;
- ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, "", title, units, family, charttype, context,
- order, update_every, module);
-
- for (w = ebpf_cgroup_pids; w; w = w->next) {
- if (unlikely(w->systemd) && unlikely(w->updated))
- fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
- }
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// Cgroup main thread
-
-/**
- * CGROUP exit
- *
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_cgroup_exit(void *ptr)
-{
- UNUSED(ptr);
-}
-
-/**
- * Cgroup integratin
- *
- * Thread responsible to call functions responsible to sync data between plugins.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_cgroup_integration(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
-
- usec_t step = USEC_PER_SEC;
- int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
- heartbeat_t hb;
- heartbeat_init(&hb);
- //Plugin will be killed when it receives a signal
- while (!ebpf_plugin_exit) {
- (void)heartbeat_next(&hb, step);
-
- // We are using a small heartbeat time to wake up thread,
- // but we should not update so frequently the shared memory data
- if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
- counter = 0;
- if (!shm_ebpf_cgroup.header)
- ebpf_map_cgroup_shared_memory();
- else
- ebpf_parse_cgroup_shm_data();
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h
deleted file mode 100644
index ba8346934..000000000
--- a/collectors/ebpf.plugin/ebpf_cgroup.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_CGROUP_H
-#define NETDATA_EBPF_CGROUP_H 1
-
-#define NETDATA_EBPF_CGROUP_MAX_TRIES 3
-#define NETDATA_EBPF_CGROUP_NEXT_TRY_SEC 30
-
-#include "ebpf.h"
-#include "ebpf_apps.h"
-
-#define NETDATA_SERVICE_FAMILY "services"
-
-struct pid_on_target2 {
- int32_t pid;
- int updated;
-
- netdata_publish_swap_t swap;
- netdata_fd_stat_t fd;
- netdata_publish_vfs_t vfs;
- ebpf_process_stat_t ps;
- netdata_dcstat_pid_t dc;
- netdata_publish_shm_t shm;
- netdata_socket_t socket;
- netdata_cachestat_pid_t cachestat;
-
- struct pid_on_target2 *next;
-};
-
-enum ebpf_cgroup_flags {
- NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART = 1,
- NETDATA_EBPF_CGROUP_HAS_SWAP_CHART = 1<<2,
- NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART = 1<<3,
- NETDATA_EBPF_CGROUP_HAS_FD_CHART = 1<<4,
- NETDATA_EBPF_CGROUP_HAS_VFS_CHART = 1<<5,
- NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART = 1<<6,
- NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART = 1<<7,
- NETDATA_EBPF_CGROUP_HAS_DC_CHART = 1<<8,
- NETDATA_EBPF_CGROUP_HAS_SHM_CHART = 1<<9
-};
-
-typedef struct ebpf_cgroup_target {
- char name[256]; // title
- uint32_t hash;
- uint32_t flags;
- uint32_t systemd;
- uint32_t updated;
-
- netdata_publish_swap_t publish_systemd_swap;
- netdata_fd_stat_t publish_systemd_fd;
- netdata_publish_vfs_t publish_systemd_vfs;
- ebpf_process_stat_t publish_systemd_ps;
- netdata_publish_dcstat_t publish_dc;
- int oomkill;
- netdata_publish_shm_t publish_shm;
- ebpf_socket_publish_apps_t publish_socket;
- netdata_publish_cachestat_t publish_cachestat;
-
- struct pid_on_target2 *pids;
- struct ebpf_cgroup_target *next;
-} ebpf_cgroup_target_t;
-
-void ebpf_map_cgroup_shared_memory();
-void ebpf_parse_cgroup_shm_data();
-void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, char *context, char *module, int update_every);
-void *ebpf_cgroup_integration(void *ptr);
-void ebpf_unmap_cgroup_shared_memory();
-extern int send_cgroup_chart;
-
-#endif /* NETDATA_EBPF_CGROUP_H */
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
deleted file mode 100644
index 4ff6c82ab..000000000
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ /dev/null
@@ -1,1420 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_dcstat.h"
-
-static char *dcstat_counter_dimension_name[NETDATA_DCSTAT_IDX_END] = { "ratio", "reference", "slow", "miss" };
-static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_END];
-static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END];
-
-netdata_dcstat_pid_t *dcstat_vector = NULL;
-
-static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
-static netdata_idx_t *dcstat_values = NULL;
-
-struct config dcstat_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "dcstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_lookup_fast",
- .function_to_attach = "lookup_fast",
- .optional = NULL,
- .retprobe = CONFIG_BOOLEAN_NO},
- {.program_name = NULL}};
-
-netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "d_lookup", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef NETDATA_DEV_MODE
-int dcstat_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects
- */
-static inline void ebpf_dc_disable_probes(struct dc_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false);
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_dc_disable_trampoline(struct dc_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false);
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_dc_set_trampoline_target(struct dc_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_lookup_fast_fentry, 0,
- dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0,
- dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_dcstat_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
-}
-
-/**
- * Mount Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_dc_attach_probes(struct dc_bpf *obj)
-{
- obj->links.netdata_d_lookup_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_d_lookup_kretprobe,
- true,
- dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
- int ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe);
- if (ret)
- return -1;
-
- char *lookup_name = (dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional) ?
- dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional :
- dc_targets[NETDATA_DC_TARGET_LOOKUP_FAST].name ;
-
- obj->links.netdata_lookup_fast_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_lookup_fast_kprobe,
- false,
- lookup_name);
- ret = libbpf_get_error(obj->links.netdata_lookup_fast_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_dcstat_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_dcstat_release_task_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_dcstat_release_task_kprobe);
- if (ret)
- return -1;
-
- return 0;
-}
-
-/**
- * Adjust Map Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_dc_adjust_map(struct dc_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS],
- em, bpf_map__name(obj->maps.dcstat_pid));
-
- ebpf_update_map_type(obj->maps.dcstat_global, &dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS]);
- ebpf_update_map_type(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS]);
- ebpf_update_map_type(obj->maps.dcstat_ctrl, &dcstat_maps[NETDATA_DCSTAT_CTRL]);
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_dc_set_hash_tables(struct dc_bpf *obj)
-{
- dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_global);
- dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd = bpf_map__fd(obj->maps.dcstat_pid);
- dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd = bpf_map__fd(obj->maps.dcstat_ctrl);
-}
-
-/**
- * Update Load
- *
- * For directory cache, some distributions change the function name, and we do not have condition to use
- * TRAMPOLINE like other functions.
- *
- * @param em structure with configuration
- *
- * @return When then symbols were not modified, it returns TRAMPOLINE, else it returns RETPROBE.
- */
-netdata_ebpf_program_loaded_t ebpf_dc_update_load(ebpf_module_t *em)
-{
- if (!strcmp(dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].optional,
- dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach))
- return EBPF_LOAD_TRAMPOLINE;
-
- if (em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode != EBPF_LOAD_RETPROBE)
- netdata_log_info("When your kernel was compiled the symbol %s was modified, instead to use `trampoline`, the plugin will use `probes`.",
- dc_optional_name[NETDATA_DC_TARGET_LOOKUP_FAST].function_to_attach);
-
- return EBPF_LOAD_RETPROBE;
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_dc_disable_release_task(struct dc_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_program_loaded_t test = ebpf_dc_update_load(em);
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_dc_disable_probes(obj);
-
- ebpf_dc_set_trampoline_target(obj);
- } else {
- ebpf_dc_disable_trampoline(obj);
- }
-
- ebpf_dc_adjust_map(obj, em);
-
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_dc_disable_release_task(obj);
-
- int ret = dc_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- ret = (test == EBPF_LOAD_TRAMPOLINE) ? dc_bpf__attach(obj) : ebpf_dc_attach_probes(obj);
- if (!ret) {
- ebpf_dc_set_hash_tables(obj);
-
- ebpf_update_controller(dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd, em);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * COMMON FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Update publish
- *
- * Update publish values before to write dimension.
- *
- * @param out structure that will receive data.
- * @param cache_access number of access to directory cache.
- * @param not_found number of files not found on the file system
- */
-void dcstat_update_publish(netdata_publish_dcstat_t *out, uint64_t cache_access, uint64_t not_found)
-{
- NETDATA_DOUBLE successful_access = (NETDATA_DOUBLE) (((long long)cache_access) - ((long long)not_found));
- NETDATA_DOUBLE ratio = (cache_access) ? successful_access/(NETDATA_DOUBLE)cache_access : 0;
-
- out->ratio = (long long )(ratio*100);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_dc_charts(char *type, int update_every);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_dc_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_DC_HIT_CHART,
- "",
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT,
- 21200,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_DC_REFERENCE_CHART,
- "",
- "Count file access",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT,
- 21201,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "",
- "Files not present inside directory cache",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT,
- 21202,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "",
- "Files not found",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT,
- 21202,
- em->update_every);
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_dc_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_dc_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_dc_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_dc_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_DCSTAT_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_dc_hit",
- "Percentage of files inside directory cache.",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- "app.ebpf_dc_hit",
- 20265,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_dc_reference",
- "Count file access.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_reference",
- 20266,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_not_cache",
- "Files not present inside directory cache.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_not_cache",
- 20267,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_not_found",
- "Files not found.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_not_found",
- 20268,
- update_every);
-
- w->charts_created &= ~(1<<EBPF_MODULE_DCSTAT_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_dc_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_DC_HIT_CHART,
- "",
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21200,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_DC_REFERENCE_CHART,
- "",
- "Variables used to calculate hit ratio.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21201,
- em->update_every);
-}
-
-/**
- * DCstat exit
- *
- * Cancel child and exit.
- *
- * @param ptr thread data.
- */
-static void ebpf_dcstat_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_dc_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_dc_apps_charts(em);
- }
-
- ebpf_obsolete_dc_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- ebpf_statistic_obsolete_aral_chart(em, dcstat_disable_priority);
-#endif
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (dc_bpf_obj) {
- dc_bpf__destroy(dc_bpf_obj);
- dc_bpf_obj = NULL;
- }
-#endif
-
- if (em->objects){
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * APPS
- *
- *****************************************************************/
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_dc_hit",
- "Percentage of files inside directory cache.",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- "app.ebpf_dc_hit",
- 20265,
- update_every,
- NETDATA_EBPF_MODULE_NAME_DCSTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION ratio '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_dc_reference",
- "Count file access.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_reference",
- 20266,
- update_every,
- NETDATA_EBPF_MODULE_NAME_DCSTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_not_cache",
- "Files not present inside directory cache.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_not_cache",
- 20267,
- update_every,
- NETDATA_EBPF_MODULE_NAME_DCSTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_not_found",
- "Files not found.",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_dc_not_found",
- 20268,
- update_every,
- NETDATA_EBPF_MODULE_NAME_DCSTAT);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION files '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- w->charts_created |= 1<<EBPF_MODULE_DCSTAT_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/*****************************************************************
- *
- * MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_dcstat_pid_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_dcstat_pid_t *w = &out[i];
- total->cache_access += w->cache_access;
- total->file_system += w->file_system;
- total->not_found += w->not_found;
- }
-}
-
-/**
- * Save PID values
- *
- * Save the current values inside the structure
- *
- * @param out vector used to plot charts
- * @param publish vector with values read from hash tables.
- */
-static inline void dcstat_save_pid_values(netdata_publish_dcstat_t *out, netdata_dcstat_pid_t *publish)
-{
- memcpy(&out->curr, &publish[0], sizeof(netdata_dcstat_pid_t));
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void dcstat_fill_pid(uint32_t current_pid, netdata_dcstat_pid_t *publish)
-{
- netdata_publish_dcstat_t *curr = dcstat_pid[current_pid];
- if (!curr) {
- curr = ebpf_publish_dcstat_get();
- dcstat_pid[current_pid] = curr;
- }
-
- dcstat_save_pid_values(curr, publish);
-}
-
-/**
- * Read Directory Cache APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_dc_apps_table(int maps_per_core)
-{
- netdata_dcstat_pid_t *cv = dcstat_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_dcstat_pid_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
- }
-
- dcstat_apps_accumulator(cv, maps_per_core);
-
- dcstat_fill_pid(key, cv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in collected PID.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_dc_cgroup(int maps_per_core)
-{
- netdata_dcstat_pid_t *cv = dcstat_vector;
- int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
-
- ebpf_cgroup_target_t *ect;
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_dcstat_pid_t *out = &pids->dc;
- if (likely(dcstat_pid) && dcstat_pid[pid]) {
- netdata_publish_dcstat_t *in = dcstat_pid[pid];
-
- memcpy(out, &in->curr, sizeof(netdata_dcstat_pid_t));
- } else {
- memset(cv, 0, length);
- if (bpf_map_lookup_elem(fd, &pid, cv)) {
- continue;
- }
-
- dcstat_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_dcstat_pid_t));
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Read global table
- *
- * Read the table with number of calls for all functions
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core)
-{
- ebpf_read_global_table_stats(dcstat_hash_values,
- dcstat_values,
- dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd,
- maps_per_core,
- NETDATA_KEY_DC_REFERENCE,
- NETDATA_DIRECTORY_CACHE_END);
-
- ebpf_read_global_table_stats(stats,
- dcstat_values,
- dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-}
-
-/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct ebpf_pid_on_target *root)
-{
- memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
- netdata_dcstat_pid_t *dst = &publish->curr;
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_dcstat_t *w = dcstat_pid[pid];
- if (w) {
- netdata_dcstat_pid_t *src = &w->curr;
- dst->cache_access += src->cache_access;
- dst->file_system += src->file_system;
- dst->not_found += src->not_found;
- }
-
- root = root->next;
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param root the target list.
-*/
-void ebpf_dcache_send_apps_data(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- collected_number value;
-
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_DCSTAT_IDX))))
- continue;
-
- ebpf_dcstat_sum_pids(&w->dcstat, w->root_pid);
-
- uint64_t cache = w->dcstat.curr.cache_access;
- uint64_t not_found = w->dcstat.curr.not_found;
-
- dcstat_update_publish(&w->dcstat, cache, not_found);
-
- value = (collected_number) w->dcstat.ratio;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_dc_hit");
- write_chart_dimension("ratio", value);
- ebpf_write_end_chart();
-
- if (w->dcstat.curr.cache_access < w->dcstat.prev.cache_access) {
- w->dcstat.prev.cache_access = 0;
- }
- w->dcstat.cache_access = (long long)w->dcstat.curr.cache_access - (long long)w->dcstat.prev.cache_access;
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_dc_reference");
- value = (collected_number) w->dcstat.cache_access;
- write_chart_dimension("files", value);
- ebpf_write_end_chart();
- w->dcstat.prev.cache_access = w->dcstat.curr.cache_access;
-
- if (w->dcstat.curr.file_system < w->dcstat.prev.file_system) {
- w->dcstat.prev.file_system = 0;
- }
- value = (collected_number) (!w->dcstat.cache_access) ? 0 :
- (long long )w->dcstat.curr.file_system - (long long)w->dcstat.prev.file_system;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_not_cache");
- write_chart_dimension("files", value);
- ebpf_write_end_chart();
- w->dcstat.prev.file_system = w->dcstat.curr.file_system;
-
- if (w->dcstat.curr.not_found < w->dcstat.prev.not_found) {
- w->dcstat.prev.not_found = 0;
- }
- value = (collected_number) (!w->dcstat.cache_access) ? 0 :
- (long long)w->dcstat.curr.not_found - (long long)w->dcstat.prev.not_found;
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_not_found");
- write_chart_dimension("files", value);
- ebpf_write_end_chart();
- w->dcstat.prev.not_found = w->dcstat.curr.not_found;
- }
-}
-
-/**
- * Send global
- *
- * Send global charts to Netdata
- */
-static void dcstat_send_global(netdata_publish_dcstat_t *publish)
-{
- dcstat_update_publish(publish, dcstat_hash_values[NETDATA_KEY_DC_REFERENCE],
- dcstat_hash_values[NETDATA_KEY_DC_MISS]);
-
- netdata_publish_syscall_t *ptr = dcstat_counter_publish_aggregated;
- netdata_idx_t value = dcstat_hash_values[NETDATA_KEY_DC_REFERENCE];
- if (value != ptr[NETDATA_DCSTAT_IDX_REFERENCE].pcall) {
- ptr[NETDATA_DCSTAT_IDX_REFERENCE].ncall = value - ptr[NETDATA_DCSTAT_IDX_REFERENCE].pcall;
- ptr[NETDATA_DCSTAT_IDX_REFERENCE].pcall = value;
-
- value = dcstat_hash_values[NETDATA_KEY_DC_SLOW];
- ptr[NETDATA_DCSTAT_IDX_SLOW].ncall = value - ptr[NETDATA_DCSTAT_IDX_SLOW].pcall;
- ptr[NETDATA_DCSTAT_IDX_SLOW].pcall = value;
-
- value = dcstat_hash_values[NETDATA_KEY_DC_MISS];
- ptr[NETDATA_DCSTAT_IDX_MISS].ncall = value - ptr[NETDATA_DCSTAT_IDX_MISS].pcall;
- ptr[NETDATA_DCSTAT_IDX_MISS].pcall = value;
- } else {
- ptr[NETDATA_DCSTAT_IDX_REFERENCE].ncall = 0;
- ptr[NETDATA_DCSTAT_IDX_SLOW].ncall = 0;
- ptr[NETDATA_DCSTAT_IDX_MISS].ncall = 0;
- }
-
- ebpf_one_dimension_write_charts(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_HIT_CHART,
- ptr[NETDATA_DCSTAT_IDX_RATIO].dimension, publish->ratio);
-
- write_count_chart(
- NETDATA_DC_REFERENCE_CHART, NETDATA_FILESYSTEM_FAMILY,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3);
-}
-
-/**
- * Create specific directory cache charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_dc_charts(char *type, int update_every)
-{
- ebpf_create_chart(type, NETDATA_DC_HIT_CHART, "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700,
- ebpf_create_global_dimension,
- dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-
- ebpf_create_chart(type, NETDATA_DC_REFERENCE_CHART, "Count file access",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_CGROUP_DC_REFERENCE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701,
- ebpf_create_global_dimension,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-
- ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "Files not present inside directory cache",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702,
- ebpf_create_global_dimension,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-
- ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "Files not found",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703,
- ebpf_create_global_dimension,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-}
-
-/**
- * Obsolete specific directory cache charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_dc_charts(char *type, int update_every)
-{
- ebpf_write_chart_obsolete(type, NETDATA_DC_HIT_CHART,
- "",
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_DC_REFERENCE_CHART,
- "",
- "Count file access",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_REFERENCE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "",
- "Files not present inside directory cache",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "",
- "Files not found",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703, update_every);
-}
-
-/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_dc_sum_cgroup_pids(netdata_publish_dcstat_t *publish, struct pid_on_target2 *root)
-{
- memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
- netdata_dcstat_pid_t *dst = &publish->curr;
- while (root) {
- netdata_dcstat_pid_t *src = &root->dc;
-
- dst->cache_access += src->cache_access;
- dst->file_system += src->file_system;
- dst->not_found += src->not_found;
-
- root = root->next;
- }
-}
-
-/**
- * Calc chart values
- *
- * Do necessary math to plot charts.
- */
-void ebpf_dc_calc_chart_values()
-{
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_dc_sum_cgroup_pids(&ect->publish_dc, ect->pids);
- uint64_t cache = ect->publish_dc.curr.cache_access;
- uint64_t not_found = ect->publish_dc.curr.not_found;
-
- dcstat_update_publish(&ect->publish_dc, cache, not_found);
-
- ect->publish_dc.cache_access = (long long)ect->publish_dc.curr.cache_access -
- (long long)ect->publish_dc.prev.cache_access;
- ect->publish_dc.prev.cache_access = ect->publish_dc.curr.cache_access;
-
- if (ect->publish_dc.curr.not_found < ect->publish_dc.prev.not_found) {
- ect->publish_dc.prev.not_found = 0;
- }
- }
-}
-
-/**
- * Create Systemd directory cache Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_dc_charts(int update_every)
-{
- ebpf_create_charts_on_systemd(NETDATA_DC_HIT_CHART,
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21200,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_DC_REFERENCE_CHART,
- "Count file access",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21201,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "Files not present inside directory cache",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21202,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "Files not found",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21202,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
-}
-
-/**
- * Send Directory Cache charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_dc_charts()
-{
- collected_number value;
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_dc.cache_access);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
- (long long )ect->publish_dc.curr.file_system - (long long)ect->publish_dc.prev.file_system;
- ect->publish_dc.prev.file_system = ect->publish_dc.curr.file_system;
-
- write_chart_dimension(ect->name, (long long) value);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
- (long long)ect->publish_dc.curr.not_found - (long long)ect->publish_dc.prev.not_found;
-
- ect->publish_dc.prev.not_found = ect->publish_dc.curr.not_found;
-
- write_chart_dimension(ect->name, (long long) value);
- }
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Directory Cache charts
- *
- * Send collected data to Netdata.
- *
- */
-static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc)
-{
- collected_number value;
- ebpf_write_begin_chart(type, NETDATA_DC_HIT_CHART, "");
- write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_RATIO].name,
- (long long) pdc->ratio);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_DC_REFERENCE_CHART, "");
- write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE].name,
- (long long) pdc->cache_access);
- ebpf_write_end_chart();
-
- value = (collected_number) (!pdc->cache_access) ? 0 :
- (long long )pdc->curr.file_system - (long long)pdc->prev.file_system;
- pdc->prev.file_system = pdc->curr.file_system;
-
- ebpf_write_begin_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART, "");
- write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW].name, (long long) value);
- ebpf_write_end_chart();
-
- value = (collected_number) (!pdc->cache_access) ? 0 :
- (long long)pdc->curr.not_found - (long long)pdc->prev.not_found;
- pdc->prev.not_found = pdc->curr.not_found;
-
- ebpf_write_begin_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART, "");
- write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS].name, (long long) value);
- ebpf_write_end_chart();
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-void ebpf_dc_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- ebpf_dc_calc_chart_values();
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_dc_charts(update_every);
- }
-
- ebpf_send_systemd_dc_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) && ect->updated) {
- ebpf_create_specific_dc_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_DC_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) {
- if (ect->updated) {
- ebpf_send_specific_dc_data(ect->name, &ect->publish_dc);
- } else {
- ebpf_obsolete_specific_dc_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_DC_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void dcstat_collector(ebpf_module_t *em)
-{
- netdata_publish_dcstat_t publish;
- memset(&publish, 0, sizeof(publish));
- int cgroups = em->cgroup_charts;
- int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_dc_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_dc_apps_table(maps_per_core);
-
- if (cgroups)
- ebpf_update_dc_cgroup(maps_per_core);
-
- pthread_mutex_lock(&lock);
-
- dcstat_send_global(&publish);
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_dcache_send_apps_data(apps_groups_root_target);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- ebpf_send_data_aral_chart(ebpf_aral_dcstat_pid, em);
-#endif
-
- if (cgroups)
- ebpf_dc_send_cgroup_data(update_every);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * INITIALIZE THREAD
- *
- *****************************************************************/
-
-/**
- * Create filesystem charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_dc_global_charts(int update_every)
-{
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_HIT_CHART,
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21200,
- ebpf_create_global_dimension,
- dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_REFERENCE_CHART,
- "Variables used to calculate hit ratio.",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21201,
- ebpf_create_global_dimension,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3,
- update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
-
- fflush(stdout);
-}
-
-/**
- * Allocate vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_dcstat_allocate_global_vectors(int apps)
-{
- if (apps) {
- ebpf_dcstat_aral_init();
- dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *));
- dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
- }
-
- dcstat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-
- memset(dcstat_counter_aggregated_data, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_syscall_stat_t));
- memset(dcstat_counter_publish_aggregated, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_publish_syscall_t));
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(dcstat_maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- dc_bpf_obj = dc_bpf__open();
- if (!dc_bpf_obj)
- ret = -1;
- else
- ret = ebpf_dc_load_and_attach(dc_bpf_obj, em);
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Directory Cache thread
- *
- * Thread used to make dcstat thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always returns NULL
- */
-void *ebpf_dcstat_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_dcstat_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = dcstat_maps;
-
- ebpf_update_pid_table(&dcstat_maps[NETDATA_DCSTAT_PID_STATS], em);
-
- ebpf_update_names(dc_optional_name, em);
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_dcstat_load_bpf(em)) {
- goto enddcstat;
- }
-
- ebpf_dcstat_allocate_global_vectors(em->apps_charts);
-
- int algorithms[NETDATA_DCSTAT_IDX_END] = {
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_ABSOLUTE_IDX
- };
-
- ebpf_global_labels(dcstat_counter_aggregated_data, dcstat_counter_publish_aggregated,
- dcstat_counter_dimension_name, dcstat_counter_dimension_name,
- algorithms, NETDATA_DCSTAT_IDX_END);
-
- pthread_mutex_lock(&lock);
- ebpf_create_dc_global_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- dcstat_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_DCSTAT_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- dcstat_collector(em);
-
-enddcstat:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
deleted file mode 100644
index 4d6aff12e..000000000
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_DCSTAT_H
-#define NETDATA_EBPF_DCSTAT_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_DCSTAT "dcstat"
-#define NETDATA_EBPF_DC_MODULE_DESC "Monitor file access using directory cache. This thread is integrated with apps and cgroup."
-
-// charts
-#define NETDATA_DC_HIT_CHART "dc_hit_ratio"
-#define NETDATA_DC_REFERENCE_CHART "dc_reference"
-#define NETDATA_DC_REQUEST_NOT_CACHE_CHART "dc_not_cache"
-#define NETDATA_DC_REQUEST_NOT_FOUND_CHART "dc_not_found"
-
-#define NETDATA_DIRECTORY_CACHE_SUBMENU "directory cache"
-
-// configuration file
-#define NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE "dcstat.conf"
-
-// Contexts
-#define NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT "cgroup.dc_ratio"
-#define NETDATA_CGROUP_DC_REFERENCE_CONTEXT "cgroup.dc_reference"
-#define NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT "cgroup.dc_not_cache"
-#define NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT "cgroup.dc_not_found"
-
-#define NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT "services.dc_ratio"
-#define NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT "services.dc_reference"
-#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache"
-#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found"
-
-// ARAL name
-#define NETDATA_EBPF_DCSTAT_ARAL_NAME "ebpf_dcstat"
-
-enum directory_cache_indexes {
- NETDATA_DCSTAT_IDX_RATIO,
- NETDATA_DCSTAT_IDX_REFERENCE,
- NETDATA_DCSTAT_IDX_SLOW,
- NETDATA_DCSTAT_IDX_MISS,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_DCSTAT_IDX_END
-};
-
-enum directory_cache_tables {
- NETDATA_DCSTAT_GLOBAL_STATS,
- NETDATA_DCSTAT_PID_STATS,
- NETDATA_DCSTAT_CTRL
-};
-
-// variables
-enum directory_cache_counters {
- NETDATA_KEY_DC_REFERENCE,
- NETDATA_KEY_DC_SLOW,
- NETDATA_KEY_DC_MISS,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_DIRECTORY_CACHE_END
-};
-
-enum directory_cache_targets {
- NETDATA_DC_TARGET_LOOKUP_FAST,
- NETDATA_DC_TARGET_D_LOOKUP
-};
-
-typedef struct netdata_publish_dcstat_pid {
- uint64_t cache_access;
- uint64_t file_system;
- uint64_t not_found;
-} netdata_dcstat_pid_t;
-
-typedef struct netdata_publish_dcstat {
- long long ratio;
- long long cache_access;
-
- netdata_dcstat_pid_t curr;
- netdata_dcstat_pid_t prev;
-} netdata_publish_dcstat_t;
-
-void *ebpf_dcstat_thread(void *ptr);
-void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
-extern struct config dcstat_config;
-extern netdata_ebpf_targets_t dc_targets[];
-extern ebpf_local_maps_t dcstat_maps[];
-
-#endif // NETDATA_EBPF_DCSTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
deleted file mode 100644
index 466c2e3bb..000000000
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ /dev/null
@@ -1,940 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/resource.h>
-#include <stdlib.h>
-
-#include "ebpf.h"
-#include "ebpf_disk.h"
-
-struct config disk_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tmp_disk_tp_stat", .internal_input = 8192, .user_input = 8192,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-static avl_tree_lock disk_tree;
-netdata_ebpf_disks_t *disk_list = NULL;
-
-char *tracepoint_block_type = { "block"} ;
-char *tracepoint_block_issue = { "block_rq_issue" };
-char *tracepoint_block_rq_complete = { "block_rq_complete" };
-
-static int was_block_issue_enabled = 0;
-static int was_block_rq_complete_enabled = 0;
-
-static char **dimensions = NULL;
-static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
-static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
-
-static netdata_idx_t *disk_hash_values = NULL;
-
-ebpf_publish_disk_t *plot_disks = NULL;
-pthread_mutex_t plot_mutex;
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Set hash table
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_disk_set_hash_table(struct disk_bpf *obj)
- {
- disk_maps[NETDATA_DISK_IO].map_fd = bpf_map__fd(obj->maps.tbl_disk_iocall);
- }
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_disk_load_and_attach(struct disk_bpf *obj)
-{
- int ret = disk_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- return disk_bpf__attach(obj);
-}
-#endif
-
-/*****************************************************************
- *
- * FUNCTIONS TO MANIPULATE HARD DISKS
- *
- *****************************************************************/
-
-/**
- * Parse start
- *
- * Parse start address of disk
- *
- * @param w structure where data is stored
- * @param filename variable used to store value
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static inline int ebpf_disk_parse_start(netdata_ebpf_disks_t *w, char *filename)
-{
- char content[FILENAME_MAX + 1];
- int fd = open(filename, O_RDONLY, 0);
- if (fd < 0) {
- return -1;
- }
-
- ssize_t file_length = read(fd, content, 4095);
- if (file_length > 0) {
- if (file_length > FILENAME_MAX)
- file_length = FILENAME_MAX;
-
- content[file_length] = '\0';
- w->start = strtoul(content, NULL, 10);
- }
- close(fd);
-
- return 0;
-}
-
-/**
- * Parse uevent
- *
- * Parse uevent file
- *
- * @param w structure where data is stored
- * @param filename variable used to store value
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static inline int ebpf_parse_uevent(netdata_ebpf_disks_t *w, char *filename)
-{
- char content[FILENAME_MAX + 1];
- int fd = open(filename, O_RDONLY, 0);
- if (fd < 0) {
- return -1;
- }
-
- ssize_t file_length = read(fd, content, FILENAME_MAX);
- if (file_length > 0) {
- if (file_length > FILENAME_MAX)
- file_length = FILENAME_MAX;
-
- content[file_length] = '\0';
-
- char *s = strstr(content, "PARTNAME=EFI");
- if (s) {
- w->main->boot_partition = w;
- w->flags |= NETDATA_DISK_HAS_EFI;
- w->boot_chart = strdupz("disk_bootsector");
- }
- }
- close(fd);
-
- return 0;
-}
-
-/**
- * Parse Size
- *
- * @param w structure where data is stored
- * @param filename variable used to store value
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static inline int ebpf_parse_size(netdata_ebpf_disks_t *w, char *filename)
-{
- char content[FILENAME_MAX + 1];
- int fd = open(filename, O_RDONLY, 0);
- if (fd < 0) {
- return -1;
- }
-
- ssize_t file_length = read(fd, content, FILENAME_MAX);
- if (file_length > 0) {
- if (file_length > FILENAME_MAX)
- file_length = FILENAME_MAX;
-
- content[file_length] = '\0';
- w->end = w->start + strtoul(content, NULL, 10) -1;
- }
- close(fd);
-
- return 0;
-}
-
-/**
- * Read Disk information
- *
- * Read disk information from /sys/block
- *
- * @param w structure where data is stored
- * @param name disk name
- */
-static void ebpf_read_disk_info(netdata_ebpf_disks_t *w, char *name)
-{
- static netdata_ebpf_disks_t *main_disk = NULL;
- static uint32_t key = 0;
- char *path = { "/sys/block" };
- char disk[NETDATA_DISK_NAME_LEN + 1];
- char filename[FILENAME_MAX + 1];
- snprintfz(disk, NETDATA_DISK_NAME_LEN, "%s", name);
- size_t length = strlen(disk);
- if (!length) {
- return;
- }
-
- length--;
- size_t curr = length;
- while (isdigit((int)disk[length])) {
- disk[length--] = '\0';
- }
-
- // We are looking for partition information, if it is a device we will ignore it.
- if (curr == length) {
- main_disk = w;
- key = MKDEV(w->major, w->minor);
- w->bootsector_key = key;
- return;
- }
- w->bootsector_key = key;
- w->main = main_disk;
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s/uevent", path, disk, name);
- if (ebpf_parse_uevent(w, filename))
- return;
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s/start", path, disk, name);
- if (ebpf_disk_parse_start(w, filename))
- return;
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s/size", path, disk, name);
- ebpf_parse_size(w, filename);
-}
-
-/**
- * New encode dev
- *
- * New encode algorithm extracted from https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L39
- *
- * @param major driver major number
- * @param minor driver minor number
- *
- * @return
- */
-static inline uint32_t netdata_new_encode_dev(uint32_t major, uint32_t minor) {
- return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
-}
-
-/**
- * Compare disks
- *
- * Compare major and minor values to add disks to tree.
- *
- * @param a pointer to netdata_ebpf_disks
- * @param b pointer to netdata_ebpf_disks
- *
- * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
-*/
-static int ebpf_compare_disks(void *a, void *b)
-{
- netdata_ebpf_disks_t *ptr1 = a;
- netdata_ebpf_disks_t *ptr2 = b;
-
- if (ptr1->dev > ptr2->dev)
- return 1;
- if (ptr1->dev < ptr2->dev)
- return -1;
-
- return 0;
-}
-
-/**
- * Update listen table
- *
- * Update link list when it is necessary.
- *
- * @param name disk name
- * @param major major disk identifier
- * @param minor minor disk identifier
- * @param current_time current timestamp
- */
-static void update_disk_table(char *name, int major, int minor, time_t current_time)
-{
- netdata_ebpf_disks_t find;
- netdata_ebpf_disks_t *w;
- size_t length;
-
- uint32_t dev = netdata_new_encode_dev(major, minor);
- find.dev = dev;
- netdata_ebpf_disks_t *ret = (netdata_ebpf_disks_t *) avl_search_lock(&disk_tree, (avl_t *)&find);
- if (ret) { // Disk is already present
- ret->flags |= NETDATA_DISK_IS_HERE;
- ret->last_update = current_time;
- return;
- }
-
- netdata_ebpf_disks_t *update_next = disk_list;
- if (likely(disk_list)) {
- netdata_ebpf_disks_t *move = disk_list;
- while (move) {
- if (dev == move->dev)
- return;
-
- update_next = move;
- move = move->next;
- }
-
- w = callocz(1, sizeof(netdata_ebpf_disks_t));
- length = strlen(name);
- if (length >= NETDATA_DISK_NAME_LEN)
- length = NETDATA_DISK_NAME_LEN;
-
- memcpy(w->family, name, length);
- w->family[length] = '\0';
- w->major = major;
- w->minor = minor;
- w->dev = netdata_new_encode_dev(major, minor);
- update_next->next = w;
- } else {
- disk_list = callocz(1, sizeof(netdata_ebpf_disks_t));
- length = strlen(name);
- if (length >= NETDATA_DISK_NAME_LEN)
- length = NETDATA_DISK_NAME_LEN;
-
- memcpy(disk_list->family, name, length);
- disk_list->family[length] = '\0';
- disk_list->major = major;
- disk_list->minor = minor;
- disk_list->dev = netdata_new_encode_dev(major, minor);
-
- w = disk_list;
- }
-
- ebpf_read_disk_info(w, name);
-
- netdata_ebpf_disks_t *check;
- check = (netdata_ebpf_disks_t *) avl_insert_lock(&disk_tree, (avl_t *)w);
- if (check != w)
- netdata_log_error("Internal error, cannot insert the AVL tree.");
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("The Latency is monitoring the hard disk %s (Major = %d, Minor = %d, Device = %u)", name, major, minor,w->dev);
-#endif
-
- w->flags |= NETDATA_DISK_IS_HERE;
-}
-
-/**
- * Read Local Disks
- *
- * Parse /proc/partitions to get block disks used to measure latency.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static int read_local_disks()
-{
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, NETDATA_EBPF_PROC_PARTITIONS);
- procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (!ff)
- return -1;
-
- ff = procfile_readall(ff);
- if (!ff)
- return -1;
-
- size_t lines = procfile_lines(ff), l;
- time_t current_time = now_realtime_sec();
- for(l = 2; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- // This is header or end of file
- if (unlikely(words < 4))
- continue;
-
- int major = (int)strtol(procfile_lineword(ff, l, 0), NULL, 10);
- // The main goal of this thread is to measure block devices, so any block device with major number
- // smaller than 7 according /proc/devices is not "important".
- if (major > 7) {
- int minor = (int)strtol(procfile_lineword(ff, l, 1), NULL, 10);
- update_disk_table(procfile_lineword(ff, l, 3), major, minor, current_time);
- }
- }
-
- procfile_close(ff);
-
- return 0;
-}
-
-/**
- * Update disks
- *
- * @param em main thread structure
- */
-void ebpf_update_disks(ebpf_module_t *em)
-{
- static time_t update_every = 0;
- time_t curr = now_realtime_sec();
- if (curr < update_every)
- return;
-
- update_every = curr + 5 * em->update_every;
-
- (void)read_local_disks();
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-/**
- * Disk disable tracepoints
- *
- * Disable tracepoints when the plugin was responsible to enable it.
- */
-static void ebpf_disk_disable_tracepoints()
-{
- char *default_message = { "Cannot disable the tracepoint" };
- if (!was_block_issue_enabled) {
- if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_issue))
- netdata_log_error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_issue);
- }
-
- if (!was_block_rq_complete_enabled) {
- if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete))
- netdata_log_error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_rq_complete);
- }
-}
-
-/**
- * Cleanup plot disks
- *
- * Clean disk list
- */
-static void ebpf_cleanup_plot_disks()
-{
- ebpf_publish_disk_t *move = plot_disks, *next;
- while (move) {
- next = move->next;
-
- freez(move);
-
- move = next;
- }
- plot_disks = NULL;
-}
-
-/**
- * Cleanup Disk List
- */
-static void ebpf_cleanup_disk_list()
-{
- netdata_ebpf_disks_t *move = disk_list;
- while (move) {
- netdata_ebpf_disks_t *next = move->next;
-
- freez(move->histogram.name);
- freez(move->boot_chart);
- freez(move);
-
- move = next;
- }
- disk_list = NULL;
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_disk_global(ebpf_module_t *em)
-{
- ebpf_publish_disk_t *move = plot_disks;
- while (move) {
- netdata_ebpf_disks_t *ned = move->plot;
- uint32_t flags = ned->flags;
- if (flags & NETDATA_DISK_CHART_CREATED) {
- ebpf_write_chart_obsolete(ned->histogram.name,
- ned->family,
- "",
- "Disk latency",
- EBPF_COMMON_DIMENSION_CALL,
- ned->family,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- ned->histogram.order,
- em->update_every);
- }
-
- move = move->next;
- }
-}
-
-/**
- * Disk exit.
- *
- * Cancel child and exit.
- *
- * @param ptr thread data.
- */
-static void ebpf_disk_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- ebpf_obsolete_disk_global(em);
-
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
- ebpf_disk_disable_tracepoints();
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, disk_maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- if (dimensions)
- ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
-
- freez(disk_hash_values);
- disk_hash_values = NULL;
- pthread_mutex_destroy(&plot_mutex);
-
- ebpf_cleanup_plot_disks();
- ebpf_cleanup_disk_list();
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Fill Plot list
- *
- * @param ptr a pointer for current disk
- */
-static void ebpf_fill_plot_disks(netdata_ebpf_disks_t *ptr)
-{
- pthread_mutex_lock(&plot_mutex);
- ebpf_publish_disk_t *w;
- if (likely(plot_disks)) {
- ebpf_publish_disk_t *move = plot_disks, *store = plot_disks;
- while (move) {
- if (move->plot == ptr) {
- pthread_mutex_unlock(&plot_mutex);
- return;
- }
-
- store = move;
- move = move->next;
- }
-
- w = callocz(1, sizeof(ebpf_publish_disk_t));
- w->plot = ptr;
- store->next = w;
- } else {
- plot_disks = callocz(1, sizeof(ebpf_publish_disk_t));
- plot_disks->plot = ptr;
- }
- pthread_mutex_unlock(&plot_mutex);
-
- ptr->flags |= NETDATA_DISK_ADDED_TO_PLOT_LIST;
-}
-
-/**
- * Read hard disk table
- *
- * Read the table with number of calls for all functions
- *
- * @param table file descriptor for table
- * @param maps_per_core do I need to read all cores?
- */
-static void read_hard_disk_tables(int table, int maps_per_core)
-{
- netdata_idx_t *values = disk_hash_values;
- block_key_t key = {};
- block_key_t next_key = {};
-
- netdata_ebpf_disks_t *ret = NULL;
-
- while (bpf_map_get_next_key(table, &key, &next_key) == 0) {
- int test = bpf_map_lookup_elem(table, &key, values);
- if (test < 0) {
- key = next_key;
- continue;
- }
-
- netdata_ebpf_disks_t find;
- find.dev = key.dev;
-
- if (likely(ret)) {
- if (find.dev != ret->dev)
- ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
- } else
- ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
-
- // Disk was inserted after we parse /proc/partitions
- if (!ret) {
- if (read_local_disks()) {
- key = next_key;
- continue;
- }
-
- ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
- if (!ret) {
- // We should never reach this point, but we are adding it to keep a safe code
- key = next_key;
- continue;
- }
- }
-
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? 1 : ebpf_nprocs;
- for (i = 0; i < end; i++) {
- total += values[i];
- }
-
- ret->histogram.histogram[key.bin] = total;
-
- if (!(ret->flags & NETDATA_DISK_ADDED_TO_PLOT_LIST))
- ebpf_fill_plot_disks(ret);
-
- key = next_key;
- }
-}
-
-/**
- * Obsolete Hard Disk charts
- *
- * Make Hard disk charts and fill chart name
- *
- * @param w the structure with necessary information to create the chart
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_hd_charts(netdata_ebpf_disks_t *w, int update_every)
-{
- ebpf_write_chart_obsolete(w->histogram.name, w->family, "", w->histogram.title, EBPF_COMMON_DIMENSION_CALL,
- w->family, NETDATA_EBPF_CHART_TYPE_STACKED, "disk.latency_io",
- w->histogram.order, update_every);
-
- w->flags = 0;
-}
-
-/**
- * Create Hard Disk charts
- *
- * Make Hard disk charts and fill chart name
- *
- * @param w the structure with necessary information to create the chart
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_hd_charts(netdata_ebpf_disks_t *w, int update_every)
-{
- int order = NETDATA_CHART_PRIO_DISK_LATENCY;
- char *family = w->family;
-
- w->histogram.name = strdupz("disk_latency_io");
- w->histogram.title = NULL;
- w->histogram.order = order;
-
- ebpf_create_chart(w->histogram.name, family, "Disk latency", EBPF_COMMON_DIMENSION_CALL,
- family, "disk.latency_io", NETDATA_EBPF_CHART_TYPE_STACKED, order,
- ebpf_create_global_dimension, disk_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
- update_every, NETDATA_EBPF_MODULE_NAME_DISK);
- order++;
-
- w->flags |= NETDATA_DISK_CHART_CREATED;
-
- fflush(stdout);
-}
-
-/**
- * Remove pointer from plot
- *
- * Remove pointer from plot list when the disk is not present.
- */
-static void ebpf_remove_pointer_from_plot_disk(ebpf_module_t *em)
-{
- time_t current_time = now_realtime_sec();
- time_t limit = 10 * em->update_every;
- pthread_mutex_lock(&plot_mutex);
- ebpf_publish_disk_t *move = plot_disks, *prev = plot_disks;
- int update_every = em->update_every;
- while (move) {
- netdata_ebpf_disks_t *ned = move->plot;
- uint32_t flags = ned->flags;
-
- if (!(flags & NETDATA_DISK_IS_HERE) && ((current_time - ned->last_update) > limit)) {
- ebpf_obsolete_hd_charts(ned, update_every);
- avl_t *ret = (avl_t *)avl_remove_lock(&disk_tree, (avl_t *)ned);
- UNUSED(ret);
- if (move == plot_disks) {
- freez(move);
- plot_disks = NULL;
- break;
- } else {
- prev->next = move->next;
- ebpf_publish_disk_t *clean = move;
- move = move->next;
- freez(clean);
- continue;
- }
- }
-
- prev = move;
- move = move->next;
- }
- pthread_mutex_unlock(&plot_mutex);
-}
-
-/**
- * Send Hard disk data
- *
- * Send hard disk information to Netdata.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_latency_send_hd_data(int update_every)
-{
- pthread_mutex_lock(&plot_mutex);
- if (!plot_disks) {
- pthread_mutex_unlock(&plot_mutex);
- return;
- }
-
- ebpf_publish_disk_t *move = plot_disks;
- while (move) {
- netdata_ebpf_disks_t *ned = move->plot;
- uint32_t flags = ned->flags;
- if (!(flags & NETDATA_DISK_CHART_CREATED)) {
- ebpf_create_hd_charts(ned, update_every);
- }
-
- if ((flags & NETDATA_DISK_CHART_CREATED)) {
- write_histogram_chart(ned->histogram.name, ned->family,
- ned->histogram.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
- }
-
- ned->flags &= ~NETDATA_DISK_IS_HERE;
-
- move = move->next;
- }
- pthread_mutex_unlock(&plot_mutex);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void disk_collector(ebpf_module_t *em)
-{
- disk_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
-
- int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- read_hard_disk_tables(disk_maps[NETDATA_DISK_IO].map_fd, maps_per_core);
- pthread_mutex_lock(&lock);
- ebpf_remove_pointer_from_plot_disk(em);
- ebpf_latency_send_hd_data(update_every);
-
- pthread_mutex_unlock(&lock);
-
- ebpf_update_disks(em);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * EBPF DISK THREAD
- *
- *****************************************************************/
-
-/**
- * Enable tracepoints
- *
- * Enable necessary tracepoints for thread.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static int ebpf_disk_enable_tracepoints()
-{
- int test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_issue);
- if (test == -1)
- return -1;
- else if (!test) {
- if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_issue))
- return -1;
- }
- was_block_issue_enabled = test;
-
- test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_rq_complete);
- if (test == -1)
- return -1;
- else if (!test) {
- if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete))
- return -1;
- }
- was_block_rq_complete_enabled = test;
-
- return 0;
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_disk_load_bpf(ebpf_module_t *em)
-{
- int ret = 0;
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- disk_bpf_obj = disk_bpf__open();
- if (!disk_bpf_obj)
- ret = -1;
- else {
- ret = ebpf_disk_load_and_attach(disk_bpf_obj);
- if (!ret)
- ebpf_disk_set_hash_table(disk_bpf_obj);
- }
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Disk thread
- *
- * Thread used to generate disk charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_disk_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_disk_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = disk_maps;
-
- if (ebpf_disk_enable_tracepoints()) {
- goto enddisk;
- }
-
- avl_init_lock(&disk_tree, ebpf_compare_disks);
- if (read_local_disks()) {
- goto enddisk;
- }
-
- if (pthread_mutex_init(&plot_mutex, NULL)) {
- netdata_log_error("Cannot initialize local mutex");
- goto enddisk;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(disk_maps, em->maps_per_core, running_on_kernel);
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_disk_load_bpf(em)) {
- goto enddisk;
- }
-
- int algorithms[NETDATA_EBPF_HIST_MAX_BINS];
- ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX);
- dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
-
- ebpf_global_labels(disk_aggregated_data, disk_publish_aggregated, dimensions, dimensions, algorithms,
- NETDATA_EBPF_HIST_MAX_BINS);
-
- pthread_mutex_lock(&lock);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, disk_maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- disk_collector(em);
-
-enddisk:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h
deleted file mode 100644
index 487ed376d..000000000
--- a/collectors/ebpf.plugin/ebpf_disk.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_DISK_H
-#define NETDATA_EBPF_DISK_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_DISK "disk"
-#define NETDATA_EBPF_DISK_MODULE_DESC "Monitor disk latency independent of filesystem."
-
-#include "libnetdata/avl/avl.h"
-#include "libnetdata/ebpf/ebpf.h"
-
-#define NETDATA_EBPF_PROC_PARTITIONS "/proc/partitions"
-
-// Process configuration name
-#define NETDATA_DISK_CONFIG_FILE "disk.conf"
-
-// Decode function extracted from: https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L7
-#define MINORBITS 20
-#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
-
-enum netdata_latency_disks_flags {
- NETDATA_DISK_ADDED_TO_PLOT_LIST = 1,
- NETDATA_DISK_CHART_CREATED = 2,
- NETDATA_DISK_IS_HERE = 4,
- NETDATA_DISK_HAS_EFI = 8
-};
-
-/*
- * The definition (DISK_NAME_LEN) has been a stable value since Kernel 3.0,
- * I decided to bring it as internal definition, to avoid include linux/genhd.h.
- */
-#define NETDATA_DISK_NAME_LEN 32
-typedef struct netdata_ebpf_disks {
- // Search
- avl_t avl;
- uint32_t dev;
- uint32_t major;
- uint32_t minor;
- uint32_t bootsector_key;
- uint64_t start; // start sector
- uint64_t end; // end sector
-
- // Print information
- char family[NETDATA_DISK_NAME_LEN + 1];
- char *boot_chart;
-
- netdata_ebpf_histogram_t histogram;
-
- uint32_t flags;
- time_t last_update;
-
- struct netdata_ebpf_disks *main;
- struct netdata_ebpf_disks *boot_partition;
- struct netdata_ebpf_disks *next;
-} netdata_ebpf_disks_t;
-
-enum ebpf_disk_tables { NETDATA_DISK_IO };
-
-typedef struct block_key {
- uint32_t bin;
- uint32_t dev;
-} block_key_t;
-
-typedef struct netdata_ebpf_publish_disk {
- netdata_ebpf_disks_t *plot;
- struct netdata_ebpf_publish_disk *next;
-} ebpf_publish_disk_t;
-
-extern struct config disk_config;
-
-void *ebpf_disk_thread(void *ptr);
-
-#endif /* NETDATA_EBPF_DISK_H */
-
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
deleted file mode 100644
index 3c8f30d3e..000000000
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ /dev/null
@@ -1,1431 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_fd.h"
-
-static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" };
-static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" };
-
-static char *close_targets[NETDATA_EBPF_MAX_FD_TARGETS] = {"close_fd", "__close_fd"};
-static char *open_targets[NETDATA_EBPF_MAX_FD_TARGETS] = {"do_sys_openat2", "do_sys_open"};
-
-static netdata_syscall_stat_t fd_aggregated_data[NETDATA_FD_SYSCALL_END];
-static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END];
-
-static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tbl_fd_global", .internal_input = NETDATA_KEY_END_VECTOR,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "fd_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-
-struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
-static netdata_idx_t *fd_values = NULL;
-
-netdata_fd_stat_t *fd_vector = NULL;
-
-netdata_ebpf_targets_t fd_targets[] = { {.name = "open", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "close", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef NETDATA_DEV_MODE
-int fd_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects
-*/
-static inline void ebpf_fd_disable_probes(struct fd_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_sys_open_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_sys_open_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false);
- if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
- bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false);
- }
-}
-
-/*
- * Disable specific probe
- *
- * Disable probes according the kernel version
- *
- * @param obj is the main structure for bpf objects
- */
-static inline void ebpf_disable_specific_probes(struct fd_bpf *obj)
-{
- if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
- bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_close_fd_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false);
- }
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_disable_trampoline(struct fd_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_sys_open_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_sys_open_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
-}
-
-/*
- * Disable specific trampoline
- *
- * Disable trampoline according to kernel version.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_disable_specific_trampoline(struct fd_bpf *obj)
-{
- if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
- bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_close_fd_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_close_fd_fexit, false);
- }
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_set_trampoline_target(struct fd_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_sys_open_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- bpf_program__set_attach_target(obj->progs.netdata_sys_open_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fd_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP);
-
- if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
- bpf_program__set_attach_target(
- obj->progs.netdata_close_fd_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- bpf_program__set_attach_target(obj->progs.netdata_close_fd_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- } else {
- bpf_program__set_attach_target(
- obj->progs.netdata___close_fd_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- bpf_program__set_attach_target(
- obj->progs.netdata___close_fd_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- }
-}
-
-/**
- * Mount Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_fd_attach_probe(struct fd_bpf *obj)
-{
- obj->links.netdata_sys_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_sys_open_kprobe, false,
- fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- int ret = libbpf_get_error(obj->links.netdata_sys_open_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_sys_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_sys_open_kretprobe, true,
- fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- ret = libbpf_get_error(obj->links.netdata_sys_open_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_release_task_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_fd_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_release_task_fd_kprobe);
- if (ret)
- return -1;
-
- if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
- obj->links.netdata_close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kretprobe, true,
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- ret = libbpf_get_error(obj->links.netdata_close_fd_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_close_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kprobe, false,
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- ret = libbpf_get_error(obj->links.netdata_close_fd_kprobe);
- if (ret)
- return -1;
- } else {
- obj->links.netdata___close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata___close_fd_kretprobe,
- true,
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- ret = libbpf_get_error(obj->links.netdata___close_fd_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata___close_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata___close_fd_kprobe,
- false,
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
- ret = libbpf_get_error(obj->links.netdata___close_fd_kprobe);
- if (ret)
- return -1;
- }
-
- return 0;
-}
-
-/**
- * FD Fill Address
- *
- * Fill address value used to load probes/trampoline.
- */
-static inline void ebpf_fd_fill_address(ebpf_addresses_t *address, char **targets)
-{
- int i;
- for (i = 0; i < NETDATA_EBPF_MAX_FD_TARGETS; i++) {
- address->function = targets[i];
- ebpf_load_addresses(address, -1);
- if (address->addr)
- break;
- }
-}
-
-/**
- * Set target values
- *
- * Set pointers used to load data.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_fd_set_target_values()
-{
- ebpf_addresses_t address = {.function = NULL, .hash = 0, .addr = 0};
- ebpf_fd_fill_address(&address, close_targets);
-
- if (!address.addr)
- return -1;
-
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = address.function;
-
- address.addr = 0;
- ebpf_fd_fill_address(&address, open_targets);
-
- if (!address.addr)
- return -1;
-
- fd_targets[NETDATA_FD_SYSCALL_OPEN].name = address.function;
-
- return 0;
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_fd_set_hash_tables(struct fd_bpf *obj)
-{
- fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd = bpf_map__fd(obj->maps.tbl_fd_global);
- fd_maps[NETDATA_FD_PID_STATS].map_fd = bpf_map__fd(obj->maps.tbl_fd_pid);
- fd_maps[NETDATA_FD_CONTROLLER].map_fd = bpf_map__fd(obj->maps.fd_ctrl);
-}
-
-/**
- * Adjust Map Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_fd_adjust_map(struct fd_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.tbl_fd_pid, &fd_maps[NETDATA_FD_PID_STATS],
- em, bpf_map__name(obj->maps.tbl_fd_pid));
-
- ebpf_update_map_type(obj->maps.tbl_fd_global, &fd_maps[NETDATA_FD_GLOBAL_STATS]);
- ebpf_update_map_type(obj->maps.tbl_fd_pid, &fd_maps[NETDATA_FD_PID_STATS]);
- ebpf_update_map_type(obj->maps.fd_ctrl, &fd_maps[NETDATA_FD_CONTROLLER]);
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_fd_disable_release_task(struct fd_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_FD_SYSCALL_OPEN].mode;
-
- if (ebpf_fd_set_target_values()) {
- netdata_log_error("%s file descriptor.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND);
- return -1;
- }
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_fd_disable_probes(obj);
- ebpf_disable_specific_trampoline(obj);
-
- ebpf_set_trampoline_target(obj);
- // TODO: Remove this in next PR, because this specific trampoline has an error.
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
- } else {
- ebpf_disable_trampoline(obj);
- ebpf_disable_specific_probes(obj);
- }
-
- ebpf_fd_adjust_map(obj, em);
-
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_fd_disable_release_task(obj);
-
- int ret = fd_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- ret = (test == EBPF_LOAD_TRAMPOLINE) ? fd_bpf__attach(obj) : ebpf_fd_attach_probe(obj);
- if (!ret) {
- ebpf_fd_set_hash_tables(obj);
-
- ebpf_update_controller(fd_maps[NETDATA_FD_CONTROLLER].map_fd, em);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_fd_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_FILE_OPEN,
- "",
- "Number of open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CGROUP_FD_OPEN_CONTEXT,
- 20270,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
- "",
- "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT,
- 20271,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_FILE_CLOSED,
- "",
- "Files closed",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CGROUP_FD_CLOSE_CONTEXT,
- 20272,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
- "",
- "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT,
- 20273,
- em->update_every);
- }
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_fd_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_fd_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_fd_charts(ect->name, em);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_fd_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_FD_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_open",
- "Number of open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_open",
- 20220,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_open_error",
- "Fails to open files.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_open_error",
- 20221,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY,
- w->clean_name,
- "_ebpf_file_closed",
- "Files closed.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_closed",
- 20222,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APPS_FAMILY,
- w->clean_name,
- "_ebpf_file_close_error",
- "Fails to close files.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_fd_close_error",
- 20223,
- update_every);
- }
- w->charts_created &= ~(1<<EBPF_MODULE_FD_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_fd_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_FILE_OPEN_CLOSE_COUNT,
- "",
- "Open and close calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_EBPF_FD_CHARTS,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_FILE_OPEN_ERR_COUNT,
- "",
- "Open fails",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_EBPF_FD_CHARTS + 1,
- em->update_every);
- }
-}
-
-/**
- * FD Exit
- *
- * Cancel child thread and exit.
- *
- * @param ptr thread data.
- */
-static void ebpf_fd_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_fd_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_fd_apps_charts(em);
- }
-
- ebpf_obsolete_fd_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- ebpf_statistic_obsolete_aral_chart(em, fd_disable_priority);
-#endif
-
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (fd_bpf_obj) {
- fd_bpf__destroy(fd_bpf_obj);
- fd_bpf_obj = NULL;
- }
-#endif
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- */
-static void ebpf_fd_send_data(ebpf_module_t *em)
-{
- fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].ncall = fd_hash_values[NETDATA_KEY_CALLS_DO_SYS_OPEN];
- fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].nerr = fd_hash_values[NETDATA_KEY_ERROR_DO_SYS_OPEN];
-
- fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].ncall = fd_hash_values[NETDATA_KEY_CALLS_CLOSE_FD];
- fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].nerr = fd_hash_values[NETDATA_KEY_ERROR_CLOSE_FD];
-
- write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_FILESYSTEM_FAMILY, fd_publish_aggregated,
- NETDATA_FD_SYSCALL_END);
-
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
- fd_publish_aggregated, NETDATA_FD_SYSCALL_END);
- }
-}
-
-/**
- * Read global counter
- *
- * Read the table with number of calls for all functions
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_fd_read_global_tables(netdata_idx_t *stats, int maps_per_core)
-{
- ebpf_read_global_table_stats(fd_hash_values,
- fd_values,
- fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd,
- maps_per_core,
- NETDATA_KEY_CALLS_DO_SYS_OPEN,
- NETDATA_FD_COUNTER);
-
- ebpf_read_global_table_stats(stats,
- fd_values,
- fd_maps[NETDATA_FD_CONTROLLER].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-}
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_fd_stat_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_fd_stat_t *w = &out[i];
- total->open_call += w->open_call;
- total->close_call += w->close_call;
- total->open_err += w->open_err;
- total->close_err += w->close_err;
- }
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
-{
- netdata_fd_stat_t *curr = fd_pid[current_pid];
- if (!curr) {
- curr = ebpf_fd_stat_get();
- fd_pid[current_pid] = curr;
- }
-
- memcpy(curr, &publish[0], sizeof(netdata_fd_stat_t));
-}
-
-/**
- * Read APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_fd_apps_table(int maps_per_core)
-{
- netdata_fd_stat_t *fv = fd_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
- size_t length = sizeof(netdata_fd_stat_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, fv)) {
- pids = pids->next;
- continue;
- }
-
- fd_apps_accumulator(fv, maps_per_core);
-
- fd_fill_pid(key, fv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(fv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data collected per PID.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_fd_cgroup(int maps_per_core)
-{
- ebpf_cgroup_target_t *ect ;
- netdata_fd_stat_t *fv = fd_vector;
- int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
- size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_fd_stat_t *out = &pids->fd;
- if (likely(fd_pid) && fd_pid[pid]) {
- netdata_fd_stat_t *in = fd_pid[pid];
-
- memcpy(out, in, sizeof(netdata_fd_stat_t));
- } else {
- memset(fv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, fv)) {
- fd_apps_accumulator(fv, maps_per_core);
-
- memcpy(out, fv, sizeof(netdata_fd_stat_t));
- }
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param fd the output
- * @param root list of pids
- */
-static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *root)
-{
- uint32_t open_call = 0;
- uint32_t close_call = 0;
- uint32_t open_err = 0;
- uint32_t close_err = 0;
-
- while (root) {
- int32_t pid = root->pid;
- netdata_fd_stat_t *w = fd_pid[pid];
- if (w) {
- open_call += w->open_call;
- close_call += w->close_call;
- open_err += w->open_err;
- close_err += w->close_err;
- }
-
- root = root->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- fd->open_call = (open_call >= fd->open_call) ? open_call : fd->open_call;
- fd->close_call = (close_call >= fd->close_call) ? close_call : fd->close_call;
- fd->open_err = (open_err >= fd->open_err) ? open_err : fd->open_err;
- fd->close_err = (close_err >= fd->close_err) ? close_err : fd->close_err;
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- * @param root the target list.
-*/
-void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
-{
- struct ebpf_target *w;
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_FD_IDX))))
- continue;
-
- ebpf_fd_sum_pids(&w->fd, w->root_pid);
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_open");
- write_chart_dimension("calls", w->fd.open_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_open_error");
- write_chart_dimension("calls", w->fd.open_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_closed");
- write_chart_dimension("calls", w->fd.close_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_close_error");
- write_chart_dimension("calls", w->fd.close_err);
- ebpf_write_end_chart();
- }
- }
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param fd structure used to store data
- * @param pids input data
- */
-static void ebpf_fd_sum_cgroup_pids(netdata_fd_stat_t *fd, struct pid_on_target2 *pids)
-{
- netdata_fd_stat_t accumulator;
- memset(&accumulator, 0, sizeof(accumulator));
-
- while (pids) {
- netdata_fd_stat_t *w = &pids->fd;
-
- accumulator.open_err += w->open_err;
- accumulator.open_call += w->open_call;
- accumulator.close_call += w->close_call;
- accumulator.close_err += w->close_err;
-
- pids = pids->next;
- }
-
- fd->open_call = (accumulator.open_call >= fd->open_call) ? accumulator.open_call : fd->open_call;
- fd->open_err = (accumulator.open_err >= fd->open_err) ? accumulator.open_err : fd->open_err;
- fd->close_call = (accumulator.close_call >= fd->close_call) ? accumulator.close_call : fd->close_call;
- fd->close_err = (accumulator.close_err >= fd->close_err) ? accumulator.close_err : fd->close_err;
-}
-
-/**
- * Create specific file descriptor charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param em the main thread structure.
- */
-static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
-{
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_CGROUP_FD_OPEN_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400,
- ebpf_create_global_dimension,
- &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401,
- ebpf_create_global_dimension,
- &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
- 1, em->update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- }
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_CGROUP_FD_CLOSE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402,
- ebpf_create_global_dimension,
- &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403,
- ebpf_create_global_dimension,
- &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
- 1, em->update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- }
-}
-
-/**
- * Obsolete specific file descriptor charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param em the main thread structure.
- */
-static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "", "Number of open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "", "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401, em->update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "", "Files closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "", "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403, em->update_every);
- }
-}
-
-/*
- * Send specific file descriptor data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- */
-static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, ebpf_module_t *em)
-{
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "");
- write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "");
- write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "");
- write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "");
- write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_err);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Create systemd file descriptor charts
- *
- * Create charts when systemd is enabled
- *
- * @param em the main collector structure
- **/
-static void ebpf_create_systemd_fd_charts(ebpf_module_t *em)
-{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20061,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20062,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
- }
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20063,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20064,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
- }
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- *
- * @param em the main collector structure
- */
-static void ebpf_send_systemd_fd_charts(ebpf_module_t *em)
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.open_err);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.close_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.close_err);
- }
- }
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the main collector structure
-*/
-static void ebpf_fd_send_cgroup_data(ebpf_module_t *em)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_fd_sum_cgroup_pids(&ect->publish_systemd_fd, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_fd_charts(em);
- }
-
- ebpf_send_systemd_fd_charts(em);
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART) && ect->updated) {
- ebpf_create_specific_fd_charts(ect->name, em);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_FD_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART ) {
- if (ect->updated) {
- ebpf_send_specific_fd_data(ect->name, &ect->publish_systemd_fd, em);
- } else {
- ebpf_obsolete_specific_fd_charts(ect->name, em);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_FD_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void fd_collector(ebpf_module_t *em)
-{
- int cgroups = em->cgroup_charts;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_fd_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_fd_apps_table(maps_per_core);
-
- if (cgroups)
- ebpf_update_fd_cgroup(maps_per_core);
-
- pthread_mutex_lock(&lock);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- ebpf_send_data_aral_chart(ebpf_aral_fd_pid, em);
-#endif
-
- ebpf_fd_send_data(em);
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_fd_send_apps_data(em, apps_groups_root_target);
-
- if (cgroups)
- ebpf_fd_send_cgroup_data(em);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_open",
- "Number of open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_open",
- 20220,
- update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_open_error",
- "Fails to open files.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_open_error",
- 20221,
- update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_closed",
- "Files closed.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_closed",
- 20222,
- update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_file_close_error",
- "Fails to close files.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_FDS,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_file_close_error",
- 20223,
- update_every,
- NETDATA_EBPF_MODULE_NAME_FD);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- w->charts_created |= 1<<EBPF_MODULE_FD_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param em a pointer to the structure with the default values.
- */
-static void ebpf_create_fd_global_charts(ebpf_module_t *em)
-{
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_FILE_OPEN_CLOSE_COUNT,
- "Open and close calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_EBPF_FD_CHARTS,
- ebpf_create_global_dimension,
- fd_publish_aggregated,
- NETDATA_FD_SYSCALL_END,
- em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_FILE_OPEN_ERR_COUNT,
- "Open fails",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_EBPF_FD_CHARTS + 1,
- ebpf_create_global_dimension,
- fd_publish_aggregated,
- NETDATA_FD_SYSCALL_END,
- em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
- }
-
- fflush(stdout);
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/**
- * Allocate vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_fd_allocate_global_vectors(int apps)
-{
- if (apps) {
- ebpf_fd_aral_init();
- fd_pid = callocz((size_t)pid_max, sizeof(netdata_fd_stat_t *));
- fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
- }
-
- fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_fd_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(fd_maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_FD_SYSCALL_OPEN].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- fd_bpf_obj = fd_bpf__open();
- if (!fd_bpf_obj)
- ret = -1;
- else
- ret = ebpf_fd_load_and_attach(fd_bpf_obj, em);
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Directory Cache thread
- *
- * Thread used to make dcstat thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always returns NULL
- */
-void *ebpf_fd_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_fd_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = fd_maps;
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_fd_load_bpf(em)) {
- goto endfd;
- }
-
- ebpf_fd_allocate_global_vectors(em->apps_charts);
-
- int algorithms[NETDATA_FD_SYSCALL_END] = {
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX
- };
-
- ebpf_global_labels(fd_aggregated_data, fd_publish_aggregated, fd_dimension_names, fd_id_names,
- algorithms, NETDATA_FD_SYSCALL_END);
-
- pthread_mutex_lock(&lock);
- ebpf_create_fd_global_charts(em);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- fd_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_FD_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- fd_collector(em);
-
-endfd:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h
deleted file mode 100644
index 00986673e..000000000
--- a/collectors/ebpf.plugin/ebpf_fd.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_FD_H
-#define NETDATA_EBPF_FD_H 1
-
-// Module name & File description
-#define NETDATA_EBPF_MODULE_NAME_FD "filedescriptor"
-#define NETDATA_EBPF_FD_MODULE_DESC "Monitor when files are open and closed. This thread is integrated with apps and cgroup."
-
-// Menu group
-#define NETDATA_FILE_GROUP "file_access"
-
-// Global chart name
-#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
-#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
-
-// Charts created on Apps submenu
-#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
-#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
-#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
-#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
-
-// Process configuration name
-#define NETDATA_FD_CONFIG_FILE "fd.conf"
-
-// Contexts
-#define NETDATA_CGROUP_FD_OPEN_CONTEXT "cgroup.fd_open"
-#define NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT "cgroup.fd_open_error"
-#define NETDATA_CGROUP_FD_CLOSE_CONTEXT "cgroup.fd_close"
-#define NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT "cgroup.fd_close_error"
-
-#define NETDATA_SYSTEMD_FD_OPEN_CONTEXT "services.fd_open"
-#define NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT "services.fd_open_error"
-#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "services.fd_close"
-#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error"
-
-// ARAL name
-#define NETDATA_EBPF_FD_ARAL_NAME "ebpf_fd"
-
-typedef struct netdata_fd_stat {
- uint32_t open_call; // Open syscalls (open and openat)
- uint32_t close_call; // Close syscall (close)
-
- // Errors
- uint32_t open_err;
- uint32_t close_err;
-} netdata_fd_stat_t;
-
-enum fd_tables {
- NETDATA_FD_PID_STATS,
- NETDATA_FD_GLOBAL_STATS,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_FD_CONTROLLER
-};
-
-enum fd_counters {
- NETDATA_KEY_CALLS_DO_SYS_OPEN,
- NETDATA_KEY_ERROR_DO_SYS_OPEN,
-
- NETDATA_KEY_CALLS_CLOSE_FD,
- NETDATA_KEY_ERROR_CLOSE_FD,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_FD_COUNTER
-};
-
-enum fd_syscalls {
- NETDATA_FD_SYSCALL_OPEN,
- NETDATA_FD_SYSCALL_CLOSE,
-
- // Do not insert nothing after this value
- NETDATA_FD_SYSCALL_END
-};
-
-enum fd_close_syscall {
- NETDATA_FD_CLOSE_FD,
- NETDATA_FD___CLOSE_FD,
-
- NETDATA_FD_CLOSE_END
-};
-
-#define NETDATA_EBPF_MAX_FD_TARGETS 2
-
-void *ebpf_fd_thread(void *ptr);
-void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_fd_release(netdata_fd_stat_t *stat);
-extern struct config fd_config;
-extern netdata_ebpf_targets_t fd_targets[];
-
-#endif /* NETDATA_EBPF_FD_H */
-
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
deleted file mode 100644
index b78e65532..000000000
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ /dev/null
@@ -1,1029 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf_filesystem.h"
-
-struct config fs_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tmp_ext4", .internal_input = 4192, .user_input = 4192,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }};
-
-ebpf_local_maps_t xfs_maps[] = {{.name = "tbl_xfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tmp_xfs", .internal_input = 4192, .user_input = 4192,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }};
-
-ebpf_local_maps_t nfs_maps[] = {{.name = "tbl_nfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tmp_nfs", .internal_input = 4192, .user_input = 4192,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }};
-
-ebpf_local_maps_t zfs_maps[] = {{.name = "tbl_zfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tmp_zfs", .internal_input = 4192, .user_input = 4192,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }};
-
-ebpf_local_maps_t btrfs_maps[] = {{.name = "tbl_btrfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tbl_ext_addr", .internal_input = 1, .user_input = 1,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tmp_btrfs", .internal_input = 4192, .user_input = 4192,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }};
-
-static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
-static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
-
-char **dimensions = NULL;
-static netdata_idx_t *filesystem_hash_values = NULL;
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * FS disable kprobe
- *
- * Disable kprobes, because system will use trampolines.
- * We are not calling this function for while, because we are prioritizing kprobes. We opted by this road, because
- * distribution are still not deliverying necessary btf files per FS.
- *
- * @param obj FS object loaded.
- */
-static void ebpf_fs_disable_kprobe(struct filesystem_bpf *obj)
- {
- // kprobe
- bpf_program__set_autoload(obj->progs.netdata_fs_file_read_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_write_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_open_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_getattr_probe, false);
- // kretprobe
- bpf_program__set_autoload(obj->progs.netdata_fs_file_read_retprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_write_retprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_open_retprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_retprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_getattr_retprobe, false);
- }
-
- /**
- * Disable trampoline
- *
- * Disable trampolines to use kprobes.
- *
- * @param obj FS object loaded.
- */
- static void ebpf_fs_disable_trampoline(struct filesystem_bpf *obj)
- {
- // entry
- bpf_program__set_autoload(obj->progs.netdata_fs_file_read_entry, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_write_entry, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_open_entry, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_getattr_entry, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_entry, false);
-
- // exit
- bpf_program__set_autoload(obj->progs.netdata_fs_file_read_exit, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_write_exit, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_file_open_exit, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_getattr_exit, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_exit, false);
- }
-
- /**
- * Set targets
- *
- * Set targets for each objects.
- *
- * @param obj FS object loaded.
- * @param functions array with function names.
- */
- static void ebpf_fs_set_target(struct filesystem_bpf *obj, const char **functions)
-{
- // entry
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_read_entry, 0,
- functions[NETDATA_KEY_BTF_READ]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_write_entry, 0,
- functions[NETDATA_KEY_BTF_WRITE]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_open_entry, 0,
- functions[NETDATA_KEY_BTF_OPEN]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_getattr_entry, 0,
- functions[NETDATA_KEY_BTF_SYNC_ATTR]);
-
- // exit
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_read_exit, 0,
- functions[NETDATA_KEY_BTF_READ]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_write_exit, 0,
- functions[NETDATA_KEY_BTF_WRITE]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_file_open_exit, 0,
- functions[NETDATA_KEY_BTF_OPEN]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_getattr_exit, 0,
- functions[NETDATA_KEY_BTF_SYNC_ATTR]);
-
- if (functions[NETDATA_KEY_BTF_OPEN2]) {
- bpf_program__set_attach_target(obj->progs.netdata_fs_2nd_file_open_entry, 0,
- functions[NETDATA_KEY_BTF_OPEN2]);
- bpf_program__set_attach_target(obj->progs.netdata_fs_2nd_file_open_exit, 0,
- functions[NETDATA_KEY_BTF_OPEN2]);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_entry, false);
- bpf_program__set_autoload(obj->progs.netdata_fs_2nd_file_open_exit, false);
- }
-}
-
-/**
- * Attach Kprobe
- *
- * Attach kprobe on targets
- *
- * @param obj FS object loaded.
- * @param functions array with function names.
- */
-static int ebpf_fs_attach_kprobe(struct filesystem_bpf *obj, const char **functions)
-{
- // kprobe
- obj->links.netdata_fs_file_read_probe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_read_probe,
- false, functions[NETDATA_KEY_BTF_READ]);
- if (libbpf_get_error(obj->links.netdata_fs_file_read_probe))
- return -1;
-
- obj->links.netdata_fs_file_write_probe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_write_probe,
- false, functions[NETDATA_KEY_BTF_WRITE]);
- if (libbpf_get_error(obj->links.netdata_fs_file_write_probe))
- return -1;
-
- obj->links.netdata_fs_file_open_probe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_open_probe,
- false, functions[NETDATA_KEY_BTF_OPEN]);
- if (libbpf_get_error(obj->links.netdata_fs_file_open_probe))
- return -1;
-
- obj->links.netdata_fs_getattr_probe = bpf_program__attach_kprobe(obj->progs.netdata_fs_getattr_probe,
- false, functions[NETDATA_KEY_BTF_SYNC_ATTR]);
- if (libbpf_get_error(obj->links.netdata_fs_getattr_probe))
- return -1;
-
- // kretprobe
- obj->links.netdata_fs_file_read_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_read_retprobe,
- false, functions[NETDATA_KEY_BTF_READ]);
- if (libbpf_get_error(obj->links.netdata_fs_file_read_retprobe))
- return -1;
-
- obj->links.netdata_fs_file_write_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_write_retprobe,
- false, functions[NETDATA_KEY_BTF_WRITE]);
- if (libbpf_get_error(obj->links.netdata_fs_file_write_retprobe))
- return -1;
-
- obj->links.netdata_fs_file_open_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_fs_file_open_retprobe,
- false, functions[NETDATA_KEY_BTF_OPEN]);
- if (libbpf_get_error(obj->links.netdata_fs_file_open_retprobe))
- return -1;
-
- obj->links.netdata_fs_getattr_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_fs_getattr_retprobe,
- false, functions[NETDATA_KEY_BTF_SYNC_ATTR]);
- if (libbpf_get_error(obj->links.netdata_fs_getattr_retprobe))
- return -1;
-
- if (functions[NETDATA_KEY_BTF_OPEN2]) {
- obj->links.netdata_fs_2nd_file_open_probe = bpf_program__attach_kprobe(obj->progs.netdata_fs_2nd_file_open_probe,
- false, functions[NETDATA_KEY_BTF_OPEN2]);
- if (libbpf_get_error(obj->links.netdata_fs_2nd_file_open_probe))
- return -1;
-
- obj->links.netdata_fs_2nd_file_open_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_fs_2nd_file_open_retprobe,
- false, functions[NETDATA_KEY_BTF_OPEN2]);
- if (libbpf_get_error(obj->links.netdata_fs_2nd_file_open_retprobe))
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Load and Attach
- *
- * Load binary and attach to targets.
- *
- * @param map Structure with information about maps.
- * @param obj FS object loaded.
- * @param functions array with function names.
- * @param bf sttruct with btf file loaded.
- */
-static inline int ebpf_fs_load_and_attach(ebpf_local_maps_t *map, struct filesystem_bpf *obj,
- const char **functions, struct btf *bf)
-{
- if (bf) {
- ebpf_fs_disable_kprobe(obj);
- ebpf_fs_set_target(obj, functions);
- } else {
- ebpf_fs_disable_trampoline(obj);
- }
-
- int ret = filesystem_bpf__load(obj);
- if (ret) {
- fprintf(stderr, "failed to load BPF object: %d\n", ret);
- return -1;
- }
-
- if (bf)
- ret = filesystem_bpf__attach(obj);
- else
- ret = ebpf_fs_attach_kprobe(obj, functions);
-
- if (!ret)
- map->map_fd = bpf_map__fd(obj->maps.tbl_fs);;
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * COMMON FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Create Filesystem chart
- *
- * Create latency charts
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_fs_charts(int update_every)
-{
- int i;
- uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED | NETDATA_FILESYSTEM_REMOVE_CHARTS;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- uint32_t flags = efp->flags;
- if ((flags & test) == test) {
- flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
- "",
- efp->hread.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hread.order, update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
- "",
- efp->hwrite.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hwrite.order, update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, "", efp->hopen.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hopen.order, update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name,"", efp->hadditional.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hadditional.order,
- update_every);
- }
- efp->flags = flags;
- }
-}
-
-/**
- * Create Filesystem chart
- *
- * Create latency charts
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_fs_charts(int update_every)
-{
- static int order = NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS;
- char chart_name[64], title[256], family[64], ctx[64];
- int i;
- uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED|NETDATA_FILESYSTEM_REMOVE_CHARTS;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- uint32_t flags = efp->flags;
- if (flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION && !(flags & test)) {
- snprintfz(title, sizeof(title) - 1, "%s latency for each read request.", efp->filesystem);
- snprintfz(family, sizeof(family) - 1, "%s_latency", efp->family);
- snprintfz(chart_name, sizeof(chart_name) - 1, "%s_read_latency", efp->filesystem);
- efp->hread.name = strdupz(chart_name);
- efp->hread.title = strdupz(title);
- efp->hread.ctx = NULL;
- efp->hread.order = order;
- efp->family_name = strdupz(family);
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
- efp->hread.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- "filesystem.read_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order,
- ebpf_create_global_dimension,
- filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
- update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
- order++;
-
- snprintfz(title, sizeof(title) - 1, "%s latency for each write request.", efp->filesystem);
- snprintfz(chart_name, sizeof(chart_name) - 1, "%s_write_latency", efp->filesystem);
- efp->hwrite.name = strdupz(chart_name);
- efp->hwrite.title = strdupz(title);
- efp->hwrite.ctx = NULL;
- efp->hwrite.order = order;
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
- efp->hwrite.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- "filesystem.write_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order,
- ebpf_create_global_dimension,
- filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
- update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
- order++;
-
- snprintfz(title, sizeof(title) - 1, "%s latency for each open request.", efp->filesystem);
- snprintfz(chart_name, sizeof(chart_name) - 1, "%s_open_latency", efp->filesystem);
- efp->hopen.name = strdupz(chart_name);
- efp->hopen.title = strdupz(title);
- efp->hopen.ctx = NULL;
- efp->hopen.order = order;
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
- efp->hopen.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- "filesystem.open_latency", NETDATA_EBPF_CHART_TYPE_STACKED, order,
- ebpf_create_global_dimension,
- filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
- update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
- order++;
-
- char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync";
- snprintfz(title, sizeof(title) - 1, "%s latency for each %s request.", efp->filesystem, type);
- snprintfz(chart_name, sizeof(chart_name) - 1, "%s_%s_latency", efp->filesystem, type);
- snprintfz(ctx, sizeof(ctx) - 1, "filesystem.%s_latency", type);
- efp->hadditional.name = strdupz(chart_name);
- efp->hadditional.title = strdupz(title);
- efp->hadditional.ctx = strdupz(ctx);
- efp->hadditional.order = order;
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, efp->hadditional.title,
- EBPF_COMMON_DIMENSION_CALL, efp->family_name,
- ctx, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
- filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
- update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
- order++;
- efp->flags |= NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
- }
- }
-
- fflush(stdout);
-}
-
-/**
- * Initialize eBPF data
- *
- * @param em main thread structure.
- *
- * @return it returns 0 on success and -1 otherwise.
- */
-int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
-{
- pthread_mutex_lock(&lock);
- int i;
- const char *saved_name = em->info.thread_name;
- uint64_t kernels = em->kernels;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) {
- em->info.thread_name = efp->filesystem;
- em->kernels = efp->kernels;
- em->maps = efp->fs_maps;
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
- if (em->load & EBPF_LOAD_LEGACY) {
- efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &efp->objects);
- if (!efp->probe_links) {
- em->info.thread_name = saved_name;
- em->kernels = kernels;
- em->maps = NULL;
- pthread_mutex_unlock(&lock);
- return -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- efp->fs_obj = filesystem_bpf__open();
- if (!efp->fs_obj) {
- em->info.thread_name = saved_name;
- em->kernels = kernels;
- return -1;
- } else {
- if (ebpf_fs_load_and_attach(em->maps, efp->fs_obj,
- efp->functions, NULL))
- return -1;
- }
- }
-#endif
- efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
- ebpf_update_kernel_memory(&plugin_statistics, efp->fs_maps, EBPF_ACTION_STAT_ADD);
-
- // Nedeed for filesystems like btrfs
- if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function)) {
- ebpf_load_addresses(&efp->addresses, efp->fs_maps[NETDATA_ADDR_FS_TABLE].map_fd);
- }
- }
- efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
- }
- em->info.thread_name = saved_name;
- pthread_mutex_unlock(&lock);
- em->kernels = kernels;
- em->maps = NULL;
-
- if (!dimensions) {
- dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
-
- memset(filesystem_aggregated_data, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_syscall_stat_t));
- memset(filesystem_publish_aggregated, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_publish_syscall_t));
-
- filesystem_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- }
-
- return 0;
-}
-
-/**
- * Read Local partitions
- *
- * @return the total of partitions that will be monitored
- */
-static int ebpf_read_local_partitions()
-{
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
- procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
- ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 0;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0;
-
- int count = 0;
- unsigned long l, i, lines = procfile_lines(ff);
- for (i = 0; localfs[i].filesystem; i++) {
- localfs[i].flags |= NETDATA_FILESYSTEM_REMOVE_CHARTS;
- }
-
- for(l = 0; l < lines ; l++) {
- // In "normal" situation the expected value is at column 7
- // When `shared` options is added to mount information, the filesystem is at column 8
- // Finally when we have systemd starting netdata, it will be at column 9
- unsigned long index = procfile_linewords(ff, l) - 3;
-
- char *fs = procfile_lineword(ff, l, index);
-
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *w = &localfs[i];
- if (w->enabled && (!strcmp(fs, w->filesystem) ||
- (w->optional_filesystem && !strcmp(fs, w->optional_filesystem)))) {
- localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
- localfs[i].flags &= ~NETDATA_FILESYSTEM_REMOVE_CHARTS;
- count++;
- break;
- }
- }
- }
- procfile_close(ff);
-
- return count;
-}
-
-/**
- * Update partition
- *
- * Update the partition structures before to plot
- *
- * @param em main thread structure
- *
- * @return 0 on success and -1 otherwise.
- */
-static int ebpf_update_partitions(ebpf_module_t *em)
-{
- static time_t update_every = 0;
- time_t curr = now_realtime_sec();
- if (curr < update_every)
- return 0;
-
- update_every = curr + 5 * em->update_every;
- if (!ebpf_read_local_partitions()) {
- em->optional = -1;
- return -1;
- }
-
- if (ebpf_filesystem_initialize_ebpf_data(em)) {
- return -1;
- }
-
- return 0;
-}
-
-/*****************************************************************
- *
- * CLEANUP FUNCTIONS
- *
- *****************************************************************/
-
-/*
- * Cleanup eBPF data
- */
-void ebpf_filesystem_cleanup_ebpf_data()
-{
- int i;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if (efp->probe_links) {
- freez(efp->family_name);
- efp->family_name = NULL;
-
- freez(efp->hread.name);
- efp->hread.name = NULL;
- freez(efp->hread.title);
- efp->hread.title = NULL;
-
- freez(efp->hwrite.name);
- efp->hwrite.name = NULL;
- freez(efp->hwrite.title);
- efp->hwrite.title = NULL;
-
- freez(efp->hopen.name);
- efp->hopen.name = NULL;
- freez(efp->hopen.title);
- efp->hopen.title = NULL;
-
- freez(efp->hadditional.name);
- efp->hadditional.name = NULL;
- freez(efp->hadditional.title);
- efp->hadditional.title = NULL;
- freez(efp->hadditional.ctx);
- efp->hadditional.ctx = NULL;
- }
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_filesystem_global(ebpf_module_t *em)
-{
- int i;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if (!efp->objects)
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- efp->hread.name,
- "",
- efp->hread.title,
- EBPF_COMMON_DIMENSION_CALL,
- efp->family_name,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "filesystem.read_latency",
- efp->hread.order,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- efp->hwrite.name,
- "",
- efp->hwrite.title,
- EBPF_COMMON_DIMENSION_CALL,
- efp->family_name,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "filesystem.write_latency",
- efp->hwrite.order,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- efp->hopen.name,
- "",
- efp->hopen.title,
- EBPF_COMMON_DIMENSION_CALL,
- efp->family_name,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "filesystem.open_latency",
- efp->hopen.order,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- efp->hadditional.name,
- "",
- efp->hadditional.title,
- EBPF_COMMON_DIMENSION_CALL,
- efp->family_name,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- efp->hadditional.ctx,
- efp->hadditional.order,
- em->update_every);
- }
-}
-
-/**
- * Filesystem exit
- *
- * Cancel child thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_filesystem_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- ebpf_obsolete_filesystem_global(em);
-
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- ebpf_filesystem_cleanup_ebpf_data();
- if (dimensions) {
- ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
- dimensions = NULL;
- }
-
- freez(filesystem_hash_values);
-
- int i;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if (!efp->probe_links)
- continue;
-
- ebpf_unload_legacy_code(efp->objects, efp->probe_links);
- efp->objects = NULL;
- efp->probe_links = NULL;
- efp->flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/**
- * Select hist
- *
- * Select a histogram to store data.
- *
- * @param efp pointer for the structure with pointers.
- * @param id histogram selector
- *
- * @return It returns a pointer for the histogram
- */
-static inline netdata_ebpf_histogram_t *select_hist(ebpf_filesystem_partitions_t *efp, uint32_t *idx, uint32_t id)
-{
- if (id < NETDATA_KEY_CALLS_READ) {
- *idx = id;
- return &efp->hread;
- } else if (id < NETDATA_KEY_CALLS_WRITE) {
- *idx = id - NETDATA_KEY_CALLS_READ;
- return &efp->hwrite;
- } else if (id < NETDATA_KEY_CALLS_OPEN) {
- *idx = id - NETDATA_KEY_CALLS_WRITE;
- return &efp->hopen;
- } else if (id < NETDATA_KEY_CALLS_SYNC ){
- *idx = id - NETDATA_KEY_CALLS_OPEN;
- return &efp->hadditional;
- }
-
- return NULL;
-}
-
-/**
- * Read hard disk table
- *
- * @param efp structure with filesystem monitored
- * @param fd file descriptor to get data.
- * @param maps_per_core do I need to read all cores?
- *
- * Read the table with number of calls for all functions
- */
-static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd, int maps_per_core)
-{
- netdata_idx_t *values = filesystem_hash_values;
- uint32_t key;
- uint32_t idx;
- for (key = 0; key < NETDATA_KEY_CALLS_SYNC; key++) {
- netdata_ebpf_histogram_t *w = select_hist(efp, &idx, key);
- if (!w) {
- continue;
- }
-
- int test = bpf_map_lookup_elem(fd, &key, values);
- if (test < 0) {
- continue;
- }
-
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++) {
- total += values[i];
- }
-
- if (idx >= NETDATA_EBPF_HIST_MAX_BINS)
- idx = NETDATA_EBPF_HIST_MAX_BINS - 1;
- w->histogram[idx] = total;
- }
-}
-
-/**
- * Read hard disk table
- *
- * Read the table with number of calls for all functions
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_filesystem_tables(int maps_per_core)
-{
- int i;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
- read_filesystem_table(efp, efp->fs_maps[NETDATA_MAIN_FS_TABLE].map_fd, maps_per_core);
- }
- }
-}
-
-/**
- * Socket read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void ebpf_filesystem_read_hash(ebpf_module_t *em)
-{
- ebpf_obsolete_fs_charts(em->update_every);
-
- (void) ebpf_update_partitions(em);
-
- if (em->optional)
- return;
-
- read_filesystem_tables(em->maps_per_core);
-}
-
-/**
- * Send Hard disk data
- *
- * Send hard disk information to Netdata.
- */
-static void ebpf_histogram_send_data()
-{
- uint32_t i;
- uint32_t test = NETDATA_FILESYSTEM_FLAG_HAS_PARTITION | NETDATA_FILESYSTEM_REMOVE_CHARTS;
- for (i = 0; localfs[i].filesystem; i++) {
- ebpf_filesystem_partitions_t *efp = &localfs[i];
- if ((efp->flags & test) == NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
- write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
- efp->hread.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
-
- write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
- efp->hwrite.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
-
- write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
- efp->hopen.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
-
- write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name,
- efp->hadditional.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
- }
- }
-}
-
-/**
- * Main loop for this collector.
- *
- * @param em main structure for this thread
- */
-static void filesystem_collector(ebpf_module_t *em)
-{
- int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- ebpf_filesystem_read_hash(em);
- pthread_mutex_lock(&lock);
-
- ebpf_create_fs_charts(update_every);
- ebpf_histogram_send_data();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * ENTRY THREAD
- *
- *****************************************************************/
-
-/**
- * Update Filesystem
- *
- * Update file system structure using values read from configuration file.
- */
-static void ebpf_update_filesystem()
-{
- char dist[NETDATA_FS_MAX_DIST_NAME + 1];
- int i;
- for (i = 0; localfs[i].filesystem; i++) {
- snprintfz(dist, NETDATA_FS_MAX_DIST_NAME, "%sdist", localfs[i].filesystem);
-
- localfs[i].enabled = appconfig_get_boolean(&fs_config, NETDATA_FILESYSTEM_CONFIG_NAME, dist,
- CONFIG_BOOLEAN_YES);
- }
-}
-
-/**
- * Set maps
- *
- * When thread is initialized the variable fs_maps is set as null,
- * this function fills the variable before to use.
- */
-static void ebpf_set_maps()
-{
- localfs[NETDATA_FS_LOCALFS_EXT4].fs_maps = ext4_maps;
- localfs[NETDATA_FS_LOCALFS_XFS].fs_maps = xfs_maps;
- localfs[NETDATA_FS_LOCALFS_NFS].fs_maps = nfs_maps;
- localfs[NETDATA_FS_LOCALFS_ZFS].fs_maps = zfs_maps;
- localfs[NETDATA_FS_LOCALFS_BTRFS].fs_maps = btrfs_maps;
-}
-
-/**
- * Filesystem thread
- *
- * Thread used to generate socket charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_filesystem_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_filesystem_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- ebpf_set_maps();
- ebpf_update_filesystem();
-
- // Initialize optional as zero, to identify when there are not partitions to monitor
- em->optional = 0;
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_update_partitions(em)) {
- if (em->optional)
- netdata_log_info("Netdata cannot monitor the filesystems used on this host.");
-
- goto endfilesystem;
- }
-
- int algorithms[NETDATA_EBPF_HIST_MAX_BINS];
- ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX);
- ebpf_global_labels(filesystem_aggregated_data, filesystem_publish_aggregated, dimensions, dimensions,
- algorithms, NETDATA_EBPF_HIST_MAX_BINS);
-
- pthread_mutex_lock(&lock);
- ebpf_create_fs_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&lock);
-
- filesystem_collector(em);
-
-endfilesystem:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h
deleted file mode 100644
index f58d7fbe4..000000000
--- a/collectors/ebpf.plugin/ebpf_filesystem.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_FILESYSTEM_H
-#define NETDATA_EBPF_FILESYSTEM_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_FILESYSTEM "filesystem"
-#define NETDATA_EBPF_FS_MODULE_DESC "Monitor filesystem latency for: btrfs, ext4, nfs, xfs and zfs."
-
-#include "ebpf.h"
-#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/filesystem.skel.h"
-#endif
-
-#define NETDATA_FS_MAX_DIST_NAME 64UL
-
-#define NETDATA_FILESYSTEM_CONFIG_NAME "filesystem"
-
-// Process configuration name
-#define NETDATA_FILESYSTEM_CONFIG_FILE "filesystem.conf"
-
-typedef struct netdata_fs_hist {
- uint32_t hist_id;
- uint32_t bin;
-} netdata_fs_hist_t;
-
-enum filesystem_limit {
- NETDATA_KEY_CALLS_READ = 24,
- NETDATA_KEY_CALLS_WRITE = 48,
- NETDATA_KEY_CALLS_OPEN = 72,
- NETDATA_KEY_CALLS_SYNC = 96
-};
-
-enum netdata_filesystem_flags {
- NETDATA_FILESYSTEM_FLAG_NO_PARTITION = 0,
- NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM = 1,
- NETDATA_FILESYSTEM_FLAG_HAS_PARTITION = 2,
- NETDATA_FILESYSTEM_FLAG_CHART_CREATED = 4,
- NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE = 8,
- NETDATA_FILESYSTEM_REMOVE_CHARTS = 16,
- NETDATA_FILESYSTEM_ATTR_CHARTS = 32
-};
-
-enum netdata_filesystem_table {
- NETDATA_MAIN_FS_TABLE,
- NETDATA_ADDR_FS_TABLE
-};
-
-enum netdata_filesystem_localfs_idx {
- NETDATA_FS_LOCALFS_EXT4,
- NETDATA_FS_LOCALFS_XFS,
- NETDATA_FS_LOCALFS_NFS,
- NETDATA_FS_LOCALFS_ZFS,
- NETDATA_FS_LOCALFS_BTRFS,
-
- NETDATA_FS_LOCALFS_END,
-};
-
-void *ebpf_filesystem_thread(void *ptr);
-extern struct config fs_config;
-
-#endif /* NETDATA_EBPF_FILESYSTEM_H */
diff --git a/collectors/ebpf.plugin/ebpf_functions.c b/collectors/ebpf.plugin/ebpf_functions.c
deleted file mode 100644
index 6a481ad64..000000000
--- a/collectors/ebpf.plugin/ebpf_functions.c
+++ /dev/null
@@ -1,1093 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_functions.h"
-
-/*****************************************************************
- * EBPF FUNCTION COMMON
- *****************************************************************/
-
-/**
- * Function Start thread
- *
- * Start a specific thread after user request.
- *
- * @param em The structure with thread information
- * @param period
- * @return
- */
-static int ebpf_function_start_thread(ebpf_module_t *em, int period)
-{
- struct netdata_static_thread *st = em->thread;
- // another request for thread that already ran, cleanup and restart
- if (st->thread)
- freez(st->thread);
-
- if (period <= 0)
- period = EBPF_DEFAULT_LIFETIME;
-
- st->thread = mallocz(sizeof(netdata_thread_t));
- em->enabled = NETDATA_THREAD_EBPF_FUNCTION_RUNNING;
- em->lifetime = period;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Starting thread %s with lifetime = %d", em->info.thread_name, period);
-#endif
-
- return netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
-}
-
-/*****************************************************************
- * EBPF SELECT MODULE
- *****************************************************************/
-
-/**
- * Select Module
- *
- * @param thread_name name of the thread we are looking for.
- *
- * @return it returns a pointer for the module that has thread_name on success or NULL otherwise.
-ebpf_module_t *ebpf_functions_select_module(const char *thread_name) {
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- if (strcmp(ebpf_modules[i].info.thread_name, thread_name) == 0) {
- return &ebpf_modules[i];
- }
- }
-
- return NULL;
-}
- */
-
-/*****************************************************************
- * EBPF HELP FUNCTIONS
- *****************************************************************/
-
-/**
- * Thread Help
- *
- * Shows help with all options accepted by thread function.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
-static void ebpf_function_thread_manipulation_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb, "%s",
- "ebpf.plugin / thread\n"
- "\n"
- "Function `thread` allows user to control eBPF threads.\n"
- "\n"
- "The following filters are supported:\n"
- "\n"
- " thread:NAME\n"
- " Shows information for the thread NAME. Names are listed inside `ebpf.d.conf`.\n"
- "\n"
- " enable:NAME:PERIOD\n"
- " Enable a specific thread named `NAME` to run a specific PERIOD in seconds. When PERIOD is not\n"
- " specified plugin will use the default 300 seconds\n"
- "\n"
- " disable:NAME\n"
- " Disable a sp.\n"
- "\n"
- "Filters can be combined. Each filter can be given only one time.\n"
- );
-
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
-
- buffer_free(wb);
-}
-*/
-
-/*****************************************************************
- * EBPF ERROR FUNCTIONS
- *****************************************************************/
-
-/**
- * Function error
- *
- * Show error when a wrong function is given
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param code the error code to show with the message.
- * @param msg the error message
- */
-static void ebpf_function_error(const char *transaction, int code, const char *msg) {
- pluginsd_function_json_error_to_stdout(transaction, code, msg);
-}
-
-/*****************************************************************
- * EBPF THREAD FUNCTION
- *****************************************************************/
-
-/**
- * Function: thread
- *
- * Enable a specific thread.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param function function name and arguments given to thread.
- * @param line_buffer buffer used to parse args
- * @param line_max Number of arguments given
- * @param timeout The function timeout
- * @param em The structure with thread information
-static void ebpf_function_thread_manipulation(const char *transaction,
- char *function __maybe_unused,
- char *line_buffer __maybe_unused,
- int line_max __maybe_unused,
- int timeout __maybe_unused,
- ebpf_module_t *em)
-{
- char *words[PLUGINSD_MAX_WORDS] = { NULL };
- char message[512];
- uint32_t show_specific_thread = 0;
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
- for(int i = 1; i < PLUGINSD_MAX_WORDS ;i++) {
- const char *keyword = get_word(words, num_words, i);
- if (!keyword)
- break;
-
- ebpf_module_t *lem;
- if(strncmp(keyword, EBPF_THREADS_ENABLE_CATEGORY, sizeof(EBPF_THREADS_ENABLE_CATEGORY) -1) == 0) {
- char thread_name[128];
- int period = -1;
- const char *name = &keyword[sizeof(EBPF_THREADS_ENABLE_CATEGORY) - 1];
- char *separator = strchr(name, ':');
- if (separator) {
- strncpyz(thread_name, name, separator - name);
- period = str2i(++separator);
- } else {
- strncpyz(thread_name, name, strlen(name));
- }
-
- lem = ebpf_functions_select_module(thread_name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (lem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- // Load configuration again
- ebpf_update_module(lem, default_btf, running_on_kernel, isrh);
-
- if (ebpf_function_start_thread(lem, period)) {
- ebpf_function_error(transaction,
- HTTP_RESP_INTERNAL_SERVER_ERROR,
- "Cannot start thread.");
- return;
- }
- } else {
- lem->running_time = 0;
- if (period > 0) // user is modifying period to run
- lem->lifetime = period;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Thread %s had lifetime updated for %d", thread_name, period);
-#endif
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if(strncmp(keyword, EBPF_THREADS_DISABLE_CATEGORY, sizeof(EBPF_THREADS_DISABLE_CATEGORY) -1) == 0) {
- const char *name = &keyword[sizeof(EBPF_THREADS_DISABLE_CATEGORY) - 1];
- lem = ebpf_functions_select_module(name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (lem->enabled < NETDATA_THREAD_EBPF_STOPPING && lem->thread->thread) {
- lem->lifetime = 0;
- lem->running_time = lem->update_every;
- netdata_thread_cancel(*lem->thread->thread);
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if(strncmp(keyword, EBPF_THREADS_SELECT_THREAD, sizeof(EBPF_THREADS_SELECT_THREAD) -1) == 0) {
- const char *name = &keyword[sizeof(EBPF_THREADS_SELECT_THREAD) - 1];
- lem = ebpf_functions_select_module(name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- show_specific_thread |= 1<<lem->thread_id;
- } else if(strncmp(keyword, "help", 4) == 0) {
- ebpf_function_thread_manipulation_help(transaction);
- return;
- }
- }
-
- time_t expires = now_realtime_sec() + em->update_every;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", em->update_every);
- buffer_json_member_add_string(wb, "help", EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-
- // Collect data
- buffer_json_member_add_array(wb, "data");
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- if (show_specific_thread && !(show_specific_thread & 1<<i))
- continue;
-
- ebpf_module_t *wem = &ebpf_modules[i];
- buffer_json_add_array_item_array(wb);
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE FIELDS!
-
- // thread name
- buffer_json_add_array_item_string(wb, wem->info.thread_name);
-
- // description
- buffer_json_add_array_item_string(wb, wem->info.thread_description);
- // Either it is not running or received a disabled signal and it is stopping.
- if (wem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING ||
- (!wem->lifetime && (int)wem->running_time == wem->update_every)) {
- // status
- buffer_json_add_array_item_string(wb, EBPF_THREAD_STATUS_STOPPED);
-
- // Time remaining
- buffer_json_add_array_item_uint64(wb, 0);
-
- // action
- buffer_json_add_array_item_string(wb, "NULL");
- } else {
- // status
- buffer_json_add_array_item_string(wb, EBPF_THREAD_STATUS_RUNNING);
-
- // Time remaining
- buffer_json_add_array_item_uint64(wb, (wem->lifetime) ? (wem->lifetime - wem->running_time) : 0);
-
- // action
- buffer_json_add_array_item_string(wb, "Enabled/Disabled");
- }
-
- buffer_json_array_close(wb);
- }
-
- buffer_json_array_close(wb); // data
-
- buffer_json_member_add_object(wb, "columns");
- {
- int fields_id = 0;
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE VALUES!
- buffer_rrdf_table_add_field(wb, fields_id++, "Thread", "Thread Name", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_UNIQUE_KEY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Description", "Thread Desc", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Status", "Thread Status", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Time", "Time Remaining", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL,
- NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Action", "Thread Action", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
- }
- buffer_json_object_close(wb); // columns
-
- buffer_json_member_add_string(wb, "default_sort_column", "Thread");
-
- buffer_json_member_add_object(wb, "charts");
- {
- // Threads
- buffer_json_member_add_object(wb, "eBPFThreads");
- {
- buffer_json_member_add_string(wb, "name", "Threads");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Threads");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Life Time
- buffer_json_member_add_object(wb, "eBPFLifeTime");
- {
- buffer_json_member_add_string(wb, "name", "LifeTime");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Threads");
- buffer_json_add_array_item_string(wb, "Time");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- // Do we use only on fields that can be groupped?
- buffer_json_member_add_object(wb, "group_by");
- {
- // group by Status
- buffer_json_member_add_object(wb, "Status");
- {
- buffer_json_member_add_string(wb, "name", "Thread status");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Status");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", expires);
- buffer_json_finalize(wb);
-
- // Lock necessary to avoid race condition
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb);
-
- buffer_free(wb);
-}
- */
-
-/*****************************************************************
- * EBPF SOCKET FUNCTION
- *****************************************************************/
-
-/**
- * Thread Help
- *
- * Shows help with all options accepted by thread function.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
-*/
-static void ebpf_function_socket_help(const char *transaction) {
- pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600);
- fprintf(stdout, "%s",
- "ebpf.plugin / socket\n"
- "\n"
- "Function `socket` display information for all open sockets during ebpf.plugin runtime.\n"
- "During thread runtime the plugin is always collecting data, but when an option is modified, the plugin\n"
- "resets completely the previous table and can show a clean data for the first request before to bring the\n"
- "modified request.\n"
- "\n"
- "The following filters are supported:\n"
- "\n"
- " family:FAMILY\n"
- " Shows information for the FAMILY specified. Option accepts IPV4, IPV6 and all, that is the default.\n"
- "\n"
- " period:PERIOD\n"
- " Enable socket to run a specific PERIOD in seconds. When PERIOD is not\n"
- " specified plugin will use the default 300 seconds\n"
- "\n"
- " resolve:BOOL\n"
- " Resolve service name, default value is YES.\n"
- "\n"
- " range:CIDR\n"
- " Show sockets that have only a specific destination. Default all addresses.\n"
- "\n"
- " port:range\n"
- " Show sockets that have only a specific destination.\n"
- "\n"
- " reset\n"
- " Send a reset to collector. When a collector receives this command, it uses everything defined in configuration file.\n"
- "\n"
- " interfaces\n"
- " When the collector receives this command, it read all available interfaces on host.\n"
- "\n"
- "Filters can be combined. Each filter can be given only one time. Default all ports\n"
- );
- pluginsd_function_result_end_to_stdout();
- fflush(stdout);
-}
-
-/**
- * Fill Fake socket
- *
- * Fill socket with an invalid request.
- *
- * @param fake_values is the structure where we are storing the value.
- */
-static inline void ebpf_socket_fill_fake_socket(netdata_socket_plus_t *fake_values)
-{
- snprintfz(fake_values->socket_string.src_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1");
- snprintfz(fake_values->socket_string.dst_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1");
- fake_values->pid = getpid();
- //fake_values->socket_string.src_port = 0;
- fake_values->socket_string.dst_port[0] = 0;
- snprintfz(fake_values->socket_string.dst_ip, NI_MAXSERV, "%s", "none");
- fake_values->data.family = AF_INET;
- fake_values->data.protocol = AF_UNSPEC;
-}
-
-/**
- * Fill function buffer
- *
- * Fill buffer with data to be shown on cloud.
- *
- * @param wb buffer where we store data.
- * @param values data read from hash table
- * @param name the process name
- */
-static void ebpf_fill_function_buffer(BUFFER *wb, netdata_socket_plus_t *values, char *name)
-{
- buffer_json_add_array_item_array(wb);
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE FIELDS!
-
- // PID
- buffer_json_add_array_item_uint64(wb, (uint64_t)values->pid);
-
- // NAME
- buffer_json_add_array_item_string(wb, (name) ? name : "not identified");
-
- // Origin
- buffer_json_add_array_item_string(wb, (values->data.external_origin) ? "incoming" : "outgoing");
-
- // Source IP
- buffer_json_add_array_item_string(wb, values->socket_string.src_ip);
-
- // SRC Port
- //buffer_json_add_array_item_uint64(wb, (uint64_t) values->socket_string.src_port);
-
- // Destination IP
- buffer_json_add_array_item_string(wb, values->socket_string.dst_ip);
-
- // DST Port
- buffer_json_add_array_item_string(wb, values->socket_string.dst_port);
-
- uint64_t connections;
- if (values->data.protocol == IPPROTO_TCP) {
- // Protocol
- buffer_json_add_array_item_string(wb, "TCP");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_received);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_sent);
-
- // Connections
- connections = values->data.tcp.ipv4_connect + values->data.tcp.ipv6_connect;
- } else if (values->data.protocol == IPPROTO_UDP) {
- // Protocol
- buffer_json_add_array_item_string(wb, "UDP");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_received);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_sent);
-
- // Connections
- connections = values->data.udp.call_udp_sent + values->data.udp.call_udp_received;
- } else {
- // Protocol
- buffer_json_add_array_item_string(wb, "UNSPEC");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, 0);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, 0);
-
- connections = 1;
- }
-
- // Connections
- if (values->flags & NETDATA_SOCKET_FLAGS_ALREADY_OPEN) {
- connections++;
- } else if (!connections) {
- // If no connections, this means that we lost when connection was opened
- values->flags |= NETDATA_SOCKET_FLAGS_ALREADY_OPEN;
- connections++;
- }
- buffer_json_add_array_item_uint64(wb, connections);
-
- buffer_json_array_close(wb);
-}
-
-/**
- * Clean Judy array unsafe
- *
- * Clean all Judy Array allocated to show table when a function is called.
- * Before to call this function it is necessary to lock `ebpf_judy_pid.index.rw_spinlock`.
- **/
-static void ebpf_socket_clean_judy_array_unsafe()
-{
- if (!ebpf_judy_pid.index.JudyLArray)
- return;
-
- Pvoid_t *pid_value, *socket_value;
- Word_t local_pid = 0, local_socket = 0;
- bool first_pid = true, first_socket = true;
- while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) {
- netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value;
- rw_spinlock_write_lock(&pid_ptr->socket_stats.rw_spinlock);
- if (pid_ptr->socket_stats.JudyLArray) {
- while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_socket, &first_socket))) {
- netdata_socket_plus_t *socket_clean = *socket_value;
- aral_freez(aral_socket_table, socket_clean);
- }
- JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0);
- pid_ptr->socket_stats.JudyLArray = NULL;
- }
- rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock);
- }
-}
-
-/**
- * Fill function buffer unsafe
- *
- * Fill the function buffer with socket information. Before to call this function it is necessary to lock
- * ebpf_judy_pid.index.rw_spinlock
- *
- * @param buf buffer used to store data to be shown by function.
- *
- * @return it returns 0 on success and -1 otherwise.
- */
-static void ebpf_socket_fill_function_buffer_unsafe(BUFFER *buf)
-{
- int counter = 0;
-
- Pvoid_t *pid_value, *socket_value;
- Word_t local_pid = 0;
- bool first_pid = true;
- while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) {
- netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value;
- bool first_socket = true;
- Word_t local_timestamp = 0;
- rw_spinlock_read_lock(&pid_ptr->socket_stats.rw_spinlock);
- if (pid_ptr->socket_stats.JudyLArray) {
- while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_timestamp, &first_socket))) {
- netdata_socket_plus_t *values = (netdata_socket_plus_t *)*socket_value;
- ebpf_fill_function_buffer(buf, values, pid_ptr->cmdline);
- }
- counter++;
- }
- rw_spinlock_read_unlock(&pid_ptr->socket_stats.rw_spinlock);
- }
-
- if (!counter) {
- netdata_socket_plus_t fake_values = { };
- ebpf_socket_fill_fake_socket(&fake_values);
- ebpf_fill_function_buffer(buf, &fake_values, NULL);
- }
-}
-
-/**
- * Socket read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
- *
- * @param buf the buffer to store data;
- * @param em the module main structure.
- *
- * @return It always returns NULL.
- */
-void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em)
-{
- // thread was not initialized or Array was reset
- rw_spinlock_read_lock(&ebpf_judy_pid.index.rw_spinlock);
- if (!em->maps || (em->maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd == ND_EBPF_MAP_FD_NOT_INITIALIZED) ||
- !ebpf_judy_pid.index.JudyLArray){
- netdata_socket_plus_t fake_values = { };
-
- ebpf_socket_fill_fake_socket(&fake_values);
-
- ebpf_fill_function_buffer(buf, &fake_values, NULL);
- rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock);
- return;
- }
-
- rw_spinlock_read_lock(&network_viewer_opt.rw_spinlock);
- ebpf_socket_fill_function_buffer_unsafe(buf);
- rw_spinlock_read_unlock(&network_viewer_opt.rw_spinlock);
- rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock);
-}
-
-/**
- * Function: Socket
- *
- * Show information for sockets stored in hash tables.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param function function name and arguments given to thread.
- * @param timeout The function timeout
- * @param cancelled Variable used to store function status.
- */
-static void ebpf_function_socket_manipulation(const char *transaction,
- char *function __maybe_unused,
- int timeout __maybe_unused,
- bool *cancelled __maybe_unused)
-{
- UNUSED(timeout);
- ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX];
-
- char *words[PLUGINSD_MAX_WORDS] = {NULL};
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
- const char *name;
- int period = -1;
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- network_viewer_opt.enabled = CONFIG_BOOLEAN_YES;
- uint32_t previous;
-
- for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
- const char *keyword = get_word(words, num_words, i);
- if (!keyword)
- break;
-
- if (strncmp(keyword, EBPF_FUNCTION_SOCKET_FAMILY, sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1];
- previous = network_viewer_opt.family;
- uint32_t family = AF_UNSPEC;
- if (!strcmp(name, "IPV4"))
- family = AF_INET;
- else if (!strcmp(name, "IPV6"))
- family = AF_INET6;
-
- if (family != previous) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- network_viewer_opt.family = family;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- ebpf_socket_clean_judy_array_unsafe();
- }
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PERIOD, sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1];
- pthread_mutex_lock(&ebpf_exit_cleanup);
- period = str2i(name);
- if (period > 0) {
- em->lifetime = period;
- } else
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
-
-#ifdef NETDATA_DEV_MODE
- collector_info("Lifetime modified for %u", em->lifetime);
-#endif
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESOLVE, sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1) == 0) {
- previous = network_viewer_opt.service_resolution_enabled;
- uint32_t resolution;
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1];
- resolution = (!strcasecmp(name, "YES")) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
-
- if (previous != resolution) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- network_viewer_opt.service_resolution_enabled = resolution;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- }
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RANGE, sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1];
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_ip_structure(&network_viewer_opt.included_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips);
- ebpf_parse_ips_unsafe((char *)name);
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PORT, sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1];
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_port_structure(&network_viewer_opt.included_port);
- ebpf_clean_port_structure(&network_viewer_opt.excluded_port);
- ebpf_parse_ports((char *)name);
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESET, sizeof(EBPF_FUNCTION_SOCKET_RESET) - 1) == 0) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_port_structure(&network_viewer_opt.included_port);
- ebpf_clean_port_structure(&network_viewer_opt.excluded_port);
-
- ebpf_clean_ip_structure(&network_viewer_opt.included_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.ipv4_local_ip);
- ebpf_clean_ip_structure(&network_viewer_opt.ipv6_local_ip);
-
- parse_network_viewer_section(&socket_config);
- ebpf_read_local_addresses_unsafe();
- network_viewer_opt.enabled = CONFIG_BOOLEAN_YES;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_INTERFACES, sizeof(EBPF_FUNCTION_SOCKET_INTERFACES) - 1) == 0) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_read_local_addresses_unsafe();
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- } else if (strncmp(keyword, "help", 4) == 0) {
- ebpf_function_socket_help(transaction);
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
- return;
- }
- }
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- // Cleanup when we already had a thread running
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- ebpf_socket_clean_judy_array_unsafe();
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- if (ebpf_function_start_thread(em, period)) {
- ebpf_function_error(transaction,
- HTTP_RESP_INTERNAL_SERVER_ERROR,
- "Cannot start thread.");
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- } else {
- if (period < 0 && em->lifetime < EBPF_NON_FUNCTION_LIFE_TIME) {
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- time_t expires = now_realtime_sec() + em->update_every;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, false);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", em->update_every);
- buffer_json_member_add_string(wb, "help", EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION);
-
- // Collect data
- buffer_json_member_add_array(wb, "data");
- ebpf_socket_read_open_connections(wb, em);
- buffer_json_array_close(wb); // data
-
- buffer_json_member_add_object(wb, "columns");
- {
- int fields_id = 0;
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE VALUES!
- buffer_rrdf_table_add_field(wb, fields_id++, "PID", "Process ID", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Process Name", "Process Name", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Origin", "The connection origin.", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Request from", "Request from IP", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- /*
- buffer_rrdf_table_add_field(wb, fields_id++, "SRC PORT", "Source Port", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
- */
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Destination IP", "Destination IP", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Destination Port", "Destination Port", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Protocol", "Communication protocol", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Incoming Bandwidth", "Bytes received.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Outgoing Bandwidth", "Bytes sent.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id, "Connections", "Number of calls to tcp_vX_connections and udp_sendmsg, where X is the protocol version.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
- }
- buffer_json_object_close(wb); // columns
-
- buffer_json_member_add_object(wb, "charts");
- {
- // OutBound Connections
- buffer_json_member_add_object(wb, "IPInboundConn");
- {
- buffer_json_member_add_string(wb, "name", "TCP Inbound Connection");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "connected_tcp");
- buffer_json_add_array_item_string(wb, "connected_udp");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // OutBound Connections
- buffer_json_member_add_object(wb, "IPTCPOutboundConn");
- {
- buffer_json_member_add_string(wb, "name", "TCP Outbound Connection");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "connected_V4");
- buffer_json_add_array_item_string(wb, "connected_V6");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // TCP Functions
- buffer_json_member_add_object(wb, "TCPFunctions");
- {
- buffer_json_member_add_string(wb, "name", "TCPFunctions");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- buffer_json_add_array_item_string(wb, "close");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // TCP Bandwidth
- buffer_json_member_add_object(wb, "TCPBandwidth");
- {
- buffer_json_member_add_string(wb, "name", "TCPBandwidth");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // UDP Functions
- buffer_json_member_add_object(wb, "UDPFunctions");
- {
- buffer_json_member_add_string(wb, "name", "UDPFunctions");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // UDP Bandwidth
- buffer_json_member_add_object(wb, "UDPBandwidth");
- {
- buffer_json_member_add_string(wb, "name", "UDPBandwidth");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_string(wb, "default_sort_column", "PID");
-
- // Do we use only on fields that can be groupped?
- buffer_json_member_add_object(wb, "group_by");
- {
- // group by PID
- buffer_json_member_add_object(wb, "PID");
- {
- buffer_json_member_add_string(wb, "name", "Process ID");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "PID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Process Name
- buffer_json_member_add_object(wb, "Process Name");
- {
- buffer_json_member_add_string(wb, "name", "Process Name");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Process Name");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Process Name
- buffer_json_member_add_object(wb, "Origin");
- {
- buffer_json_member_add_string(wb, "name", "Origin");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Origin");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Request From IP
- buffer_json_member_add_object(wb, "Request from");
- {
- buffer_json_member_add_string(wb, "name", "Request from IP");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Request from");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Destination IP
- buffer_json_member_add_object(wb, "Destination IP");
- {
- buffer_json_member_add_string(wb, "name", "Destination IP");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Destination IP");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by DST Port
- buffer_json_member_add_object(wb, "Destination Port");
- {
- buffer_json_member_add_string(wb, "name", "Destination Port");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Destination Port");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Protocol
- buffer_json_member_add_object(wb, "Protocol");
- {
- buffer_json_member_add_string(wb, "name", "Protocol");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Protocol");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", expires);
- buffer_json_finalize(wb);
-
- // Lock necessary to avoid race condition
- pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires);
-
- fwrite(buffer_tostring(wb), buffer_strlen(wb), 1, stdout);
-
- pluginsd_function_result_end_to_stdout();
- fflush(stdout);
-
- buffer_free(wb);
-}
-
-/*****************************************************************
- * EBPF FUNCTION THREAD
- *****************************************************************/
-
-/**
- * FUNCTION thread.
- *
- * @param ptr a `ebpf_module_t *`.
- *
- * @return always NULL.
- */
-void *ebpf_function_thread(void *ptr)
-{
- (void)ptr;
-
- struct functions_evloop_globals *wg = functions_evloop_init(1,
- "EBPF",
- &lock,
- &ebpf_plugin_exit);
-
- functions_evloop_add_function(wg,
- "ebpf_socket",
- ebpf_function_socket_manipulation,
- PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(!ebpf_plugin_exit) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit) {
- break;
- }
- }
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_functions.h b/collectors/ebpf.plugin/ebpf_functions.h
deleted file mode 100644
index 795703b42..000000000
--- a/collectors/ebpf.plugin/ebpf_functions.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_FUNCTIONS_H
-#define NETDATA_EBPF_FUNCTIONS_H 1
-
-#ifdef NETDATA_DEV_MODE
-// Common
-static inline void EBPF_PLUGIN_FUNCTIONS(const char *NAME, const char *DESC) {
- fprintf(stdout, "%s \"%s\" 10 \"%s\"\n", PLUGINSD_KEYWORD_FUNCTION, NAME, DESC);
-}
-#endif
-
-// configuration file & description
-#define NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE "functions.conf"
-#define NETDATA_EBPF_FUNCTIONS_MODULE_DESC "Show information about current function status."
-
-// function list
-#define EBPF_FUNCTION_THREAD "ebpf_thread"
-#define EBPF_FUNCTION_SOCKET "ebpf_socket"
-
-// thread constants
-#define EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION "Detailed information about eBPF threads."
-#define EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND "ebpf.plugin does not have thread named "
-
-#define EBPF_THREADS_SELECT_THREAD "thread:"
-#define EBPF_THREADS_ENABLE_CATEGORY "enable:"
-#define EBPF_THREADS_DISABLE_CATEGORY "disable:"
-
-#define EBPF_THREAD_STATUS_RUNNING "running"
-#define EBPF_THREAD_STATUS_STOPPED "stopped"
-
-// socket constants
-#define EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION "Detailed information about open sockets."
-#define EBPF_FUNCTION_SOCKET_FAMILY "family:"
-#define EBPF_FUNCTION_SOCKET_PERIOD "period:"
-#define EBPF_FUNCTION_SOCKET_RESOLVE "resolve:"
-#define EBPF_FUNCTION_SOCKET_RANGE "range:"
-#define EBPF_FUNCTION_SOCKET_PORT "port:"
-#define EBPF_FUNCTION_SOCKET_RESET "reset"
-#define EBPF_FUNCTION_SOCKET_INTERFACES "interfaces"
-
-void *ebpf_function_thread(void *ptr);
-
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
deleted file mode 100644
index 465ee6434..000000000
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ /dev/null
@@ -1,686 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_hardirq.h"
-
-struct config hardirq_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static ebpf_local_maps_t hardirq_maps[] = {
- {
- .name = "tbl_hardirq",
- .internal_input = NETDATA_HARDIRQ_MAX_IRQS,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {
- .name = "tbl_hardirq_static",
- .internal_input = HARDIRQ_EBPF_STATIC_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- /* end */
- {
- .name = NULL,
- .internal_input = 0,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }
-};
-
-#define HARDIRQ_TP_CLASS_IRQ "irq"
-#define HARDIRQ_TP_CLASS_IRQ_VECTORS "irq_vectors"
-static ebpf_tracepoint_t hardirq_tracepoints[] = {
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_exit"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_entry"},
- {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_exit"},
- /* end */
- {.enabled = false, .class = NULL, .event = NULL}
-};
-
-static hardirq_static_val_t hardirq_static_vals[] = {
- {
- .idx = HARDIRQ_EBPF_STATIC_APIC_THERMAL,
- .name = "apic_thermal",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_APIC_THRESHOLD,
- .name = "apic_threshold",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_APIC_ERROR,
- .name = "apic_error",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR,
- .name = "apic_deferred_error",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_APIC_SPURIOUS,
- .name = "apic_spurious",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL,
- .name = "func_call",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE,
- .name = "func_call_single",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_RESCHEDULE,
- .name = "reschedule",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_LOCAL_TIMER,
- .name = "local_timer",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_IRQ_WORK,
- .name = "irq_work",
- .latency = 0
- },
- {
- .idx = HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI,
- .name = "x86_platform_ipi",
- .latency = 0
- },
-};
-
-// store for "published" data from the reader thread, which the collector
-// thread will write to netdata agent.
-static avl_tree_lock hardirq_pub;
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Set hash table
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_hardirq_set_hash_table(struct hardirq_bpf *obj)
-{
- hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd = bpf_map__fd(obj->maps.tbl_hardirq);
- hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd = bpf_map__fd(obj->maps.tbl_hardirq_static);
-}
-
-/**
- * Load and Attach
- *
- * Load and attach bpf software.
- */
-static inline int ebpf_hardirq_load_and_attach(struct hardirq_bpf *obj)
-{
- int ret = hardirq_bpf__load(obj);
- if (ret) {
- return -1;
- }
-
- return hardirq_bpf__attach(obj);
-}
-#endif
-
-/*****************************************************************
- *
- * ARAL SECTION
- *
- *****************************************************************/
-
-// ARAL vectors used to speed up processing
-ARAL *ebpf_aral_hardirq = NULL;
-
-/**
- * eBPF hardirq Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-static inline void ebpf_hardirq_aral_init()
-{
- ebpf_aral_hardirq = ebpf_allocate_pid_aral(NETDATA_EBPF_HARDIRQ_ARAL_NAME, sizeof(hardirq_val_t));
-}
-
-/**
- * eBPF hardirq get
- *
- * Get a hardirq_val_t entry to be used with a specific IRQ.
- *
- * @return it returns the address on success.
- */
-hardirq_val_t *ebpf_hardirq_get(void)
-{
- hardirq_val_t *target = aral_mallocz(ebpf_aral_hardirq);
- memset(target, 0, sizeof(hardirq_val_t));
- return target;
-}
-
-/**
- * eBPF hardirq release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_hardirq_release(hardirq_val_t *stat)
-{
- aral_freez(ebpf_aral_hardirq, stat);
-}
-
-/*****************************************************************
- *
- * EXIT FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_hardirq_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- "hardirq_latency",
- "",
- "Hardware IRQ latency",
- EBPF_COMMON_DIMENSION_MILLISECONDS,
- "interrupts",
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- NETDATA_CHART_PRIO_HARDIRQ_LATENCY,
- em->update_every
- );
-}
-
-/**
- * Hardirq Exit
- *
- * Cancel child and exit.
- *
- * @param ptr thread data.
- */
-static void hardirq_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- ebpf_obsolete_hardirq_global(em);
-
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) {
- ebpf_disable_tracepoint(&hardirq_tracepoints[i]);
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- * MAIN LOOP
- *****************************************************************/
-
-/**
- * Compare hard IRQ values.
- *
- * @param a `hardirq_val_t *`.
- * @param b `hardirq_val_t *`.
- *
- * @return 0 if a==b, 1 if a>b, -1 if a<b.
-*/
-static int hardirq_val_cmp(void *a, void *b)
-{
- hardirq_val_t *ptr1 = a;
- hardirq_val_t *ptr2 = b;
-
- if (ptr1->irq > ptr2->irq) {
- return 1;
- }
- else if (ptr1->irq < ptr2->irq) {
- return -1;
- }
- else {
- return 0;
- }
-}
-
-/**
- * Parse interrupts
- *
- * Parse /proc/interrupts to get names used in metrics
- *
- * @param irq_name vector to store data.
- * @param irq irq value
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static int hardirq_parse_interrupts(char *irq_name, int irq)
-{
- static procfile *ff = NULL;
- static int cpus = -1;
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts");
- ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- }
- if(unlikely(!ff))
- return -1;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return -1; // we return 0, so that we will retry to open it next time
-
- size_t words = procfile_linewords(ff, 0);
- if(unlikely(cpus == -1)) {
- uint32_t w;
- cpus = 0;
- for(w = 0; w < words ; w++) {
- if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
- cpus++;
- }
- }
-
- size_t lines = procfile_lines(ff), l;
- if(unlikely(!lines)) {
- collector_error("Cannot read /proc/interrupts, zero lines reported.");
- return -1;
- }
-
- for(l = 1; l < lines ;l++) {
- words = procfile_linewords(ff, l);
- if(unlikely(!words)) continue;
- const char *id = procfile_lineword(ff, l, 0);
- if (!isdigit(id[0]))
- continue;
-
- int cmp = str2i(id);
- if (cmp != irq)
- continue;
-
- if(unlikely((uint32_t)(cpus + 2) < words)) {
- const char *name = procfile_lineword(ff, l, words - 1);
- // On some motherboards IRQ can have the same name, so we append IRQ id to differentiate.
- snprintfz(irq_name, NETDATA_HARDIRQ_NAME_LEN - 1, "%d_%s", irq, name);
- }
- }
-
- return 0;
-}
-
-/**
- * Read Latency MAP
- *
- * Read data from kernel ring to user ring.
- *
- * @param mapfd hash map id.
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static int hardirq_read_latency_map(int mapfd)
-{
- static hardirq_ebpf_static_val_t *hardirq_ebpf_vals = NULL;
- if (!hardirq_ebpf_vals)
- hardirq_ebpf_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
-
- hardirq_ebpf_key_t key = {};
- hardirq_ebpf_key_t next_key = {};
- hardirq_val_t search_v = {};
- hardirq_val_t *v = NULL;
-
- while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
- // get val for this key.
- int test = bpf_map_lookup_elem(mapfd, &key, hardirq_ebpf_vals);
- if (unlikely(test < 0)) {
- key = next_key;
- continue;
- }
-
- // is this IRQ saved yet?
- //
- // if not, make a new one, mark it as unsaved for now, and continue; we
- // will insert it at the end after all of its values are correctly set,
- // so that we can safely publish it to the collector within a single,
- // short locked operation.
- //
- // otherwise simply continue; we will only update the latency, which
- // can be republished safely without a lock.
- //
- // NOTE: lock isn't strictly necessary for this initial search, as only
- // this thread does writing, but the AVL is using a read-write lock so
- // there is no congestion.
- bool v_is_new = false;
- search_v.irq = key.irq;
- v = (hardirq_val_t *)avl_search_lock(&hardirq_pub, (avl_t *)&search_v);
- if (unlikely(v == NULL)) {
- // latency/name can only be added reliably at a later time.
- // when they're added, only then will we AVL insert.
- v = ebpf_hardirq_get();
- v->irq = key.irq;
- v->dim_exists = false;
-
- v_is_new = true;
- }
-
- // note two things:
- // 1. we must add up latency value for this IRQ across all CPUs.
- // 2. the name is unfortunately *not* available on all CPU maps - only
- // a single map contains the name, so we must find it. we only need
- // to copy it though if the IRQ is new for us.
- uint64_t total_latency = 0;
- int i;
- for (i = 0; i < ebpf_nprocs; i++) {
- total_latency += hardirq_ebpf_vals[i].latency/1000;
- }
-
- // can now safely publish latency for existing IRQs.
- v->latency = total_latency;
-
- // can now safely publish new IRQ.
- if (v_is_new) {
- if (hardirq_parse_interrupts(v->name, v->irq)) {
- ebpf_hardirq_release(v);
- return -1;
- }
-
- avl_t *check = avl_insert_lock(&hardirq_pub, (avl_t *)v);
- if (check != (avl_t *)v) {
- netdata_log_error("Internal error, cannot insert the AVL tree.");
- }
- }
-
- key = next_key;
- }
-
- return 0;
-}
-
-static void hardirq_read_latency_static_map(int mapfd)
-{
- static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
- if (!hardirq_ebpf_static_vals)
- hardirq_ebpf_static_vals = callocz(ebpf_nprocs + 1, sizeof(hardirq_ebpf_static_val_t));
-
- uint32_t i;
- for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
- uint32_t map_i = hardirq_static_vals[i].idx;
- int test = bpf_map_lookup_elem(mapfd, &map_i, hardirq_ebpf_static_vals);
- if (unlikely(test < 0)) {
- continue;
- }
-
- uint64_t total_latency = 0;
- int cpu_i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
- for (cpu_i = 0; cpu_i < end; cpu_i++) {
- total_latency += hardirq_ebpf_static_vals[cpu_i].latency/1000;
- }
-
- hardirq_static_vals[i].latency = total_latency;
- }
-}
-
-/**
- * Read eBPF maps for hard IRQ.
- *
- * @return When it is not possible to parse /proc, it returns -1, on success it returns 0;
- */
-static int hardirq_reader()
-{
- if (hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd))
- return -1;
-
- hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd);
-
- return 0;
-}
-
-static void hardirq_create_charts(int update_every)
-{
- ebpf_create_chart(
- NETDATA_EBPF_SYSTEM_GROUP,
- "hardirq_latency",
- "Hardware IRQ latency",
- EBPF_COMMON_DIMENSION_MILLISECONDS,
- "interrupts",
- NULL,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CHART_PRIO_HARDIRQ_LATENCY,
- NULL, NULL, 0, update_every,
- NETDATA_EBPF_MODULE_NAME_HARDIRQ
- );
-
- fflush(stdout);
-}
-
-static void hardirq_create_static_dims()
-{
- uint32_t i;
- for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
- ebpf_write_global_dimension(
- hardirq_static_vals[i].name, hardirq_static_vals[i].name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
- );
- }
-}
-
-// callback for avl tree traversal on `hardirq_pub`.
-static int hardirq_write_dims(void *entry, void *data)
-{
- UNUSED(data);
-
- hardirq_val_t *v = entry;
-
- // IRQs get dynamically added in, so add the dimension if we haven't yet.
- if (!v->dim_exists) {
- ebpf_write_global_dimension(
- v->name, v->name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
- );
- v->dim_exists = true;
- }
-
- write_chart_dimension(v->name, v->latency);
-
- return 1;
-}
-
-static inline void hardirq_write_static_dims()
-{
- uint32_t i;
- for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
- write_chart_dimension(
- hardirq_static_vals[i].name,
- hardirq_static_vals[i].latency
- );
- }
-}
-
-/**
-* Main loop for this collector.
- *
- * @param em the main thread structure.
-*/
-static void hardirq_collector(ebpf_module_t *em)
-{
- memset(&hardirq_pub, 0, sizeof(hardirq_pub));
- avl_init_lock(&hardirq_pub, hardirq_val_cmp);
- ebpf_hardirq_aral_init();
-
- // create chart and static dims.
- pthread_mutex_lock(&lock);
- hardirq_create_charts(em->update_every);
- hardirq_create_static_dims();
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- // loop and read from published data until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- //This will be cancelled by its parent
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- if (hardirq_reader())
- break;
-
- pthread_mutex_lock(&lock);
-
- // write dims now for all hitherto discovered IRQs.
- ebpf_write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency", "");
- avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL);
- hardirq_write_static_dims();
- ebpf_write_end_chart();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- * EBPF HARDIRQ THREAD
- *****************************************************************/
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_hardirq_load_bpf(ebpf_module_t *em)
-{
- int ret = 0;
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- hardirq_bpf_obj = hardirq_bpf__open();
- if (!hardirq_bpf_obj)
- ret = -1;
- else {
- ret = ebpf_hardirq_load_and_attach(hardirq_bpf_obj);
- if (!ret)
- ebpf_hardirq_set_hash_table(hardirq_bpf_obj);
- }
- }
-#endif
-
- return ret;
-}
-
-/**
- * Hard IRQ latency thread.
- *
- * @param ptr a `ebpf_module_t *`.
- * @return always NULL.
- */
-void *ebpf_hardirq_thread(void *ptr)
-{
- netdata_thread_cleanup_push(hardirq_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = hardirq_maps;
-
- if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) {
- goto endhardirq;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_hardirq_load_bpf(em)) {
- goto endhardirq;
- }
-
- hardirq_collector(em);
-
-endhardirq:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h
deleted file mode 100644
index 35b03b761..000000000
--- a/collectors/ebpf.plugin/ebpf_hardirq.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_HARDIRQ_H
-#define NETDATA_EBPF_HARDIRQ_H 1
-
-// Module description
-#define NETDATA_EBPF_HARDIRQ_MODULE_DESC "Show time spent servicing individual hardware interrupt requests (hard IRQs)."
-
-#include <stdint.h>
-#include "libnetdata/avl/avl.h"
-
-/*****************************************************************
- * copied from kernel-collectors repo, with modifications needed
- * for inclusion here.
- *****************************************************************/
-
-#define NETDATA_HARDIRQ_NAME_LEN 32
-#define NETDATA_HARDIRQ_MAX_IRQS 1024L
-
-typedef struct hardirq_ebpf_key {
- int irq;
-} hardirq_ebpf_key_t;
-
-enum hardirq_ebpf_static {
- HARDIRQ_EBPF_STATIC_APIC_THERMAL,
- HARDIRQ_EBPF_STATIC_APIC_THRESHOLD,
- HARDIRQ_EBPF_STATIC_APIC_ERROR,
- HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR,
- HARDIRQ_EBPF_STATIC_APIC_SPURIOUS,
- HARDIRQ_EBPF_STATIC_FUNC_CALL,
- HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE,
- HARDIRQ_EBPF_STATIC_RESCHEDULE,
- HARDIRQ_EBPF_STATIC_LOCAL_TIMER,
- HARDIRQ_EBPF_STATIC_IRQ_WORK,
- HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI,
-
- HARDIRQ_EBPF_STATIC_END
-};
-
-enum hardirq_maps {
- HARDIRQ_MAP_LATENCY,
- HARDIRQ_MAP_LATENCY_STATIC
-};
-
-typedef struct hardirq_ebpf_static_val {
- uint64_t latency;
- uint64_t ts;
-} hardirq_ebpf_static_val_t;
-
-/*****************************************************************
- * below this is eBPF plugin-specific code.
- *****************************************************************/
-
-// ARAL Name
-#define NETDATA_EBPF_HARDIRQ_ARAL_NAME "ebpf_harddirq"
-
-#define NETDATA_EBPF_MODULE_NAME_HARDIRQ "hardirq"
-#define NETDATA_HARDIRQ_CONFIG_FILE "hardirq.conf"
-
-typedef struct hardirq_val {
- // must be at top for simplified AVL tree usage.
- // if it's not at the top, we need to use `containerof` for almost all ops.
- avl_t avl;
-
- int irq;
- bool dim_exists; // keep this after `int irq` for alignment byte savings.
- uint64_t latency;
- char name[NETDATA_HARDIRQ_NAME_LEN];
-} hardirq_val_t;
-
-typedef struct hardirq_static_val {
- enum hardirq_ebpf_static idx;
- char *name;
- uint64_t latency;
-} hardirq_static_val_t;
-
-extern struct config hardirq_config;
-void *ebpf_hardirq_thread(void *ptr);
-
-#endif /* NETDATA_EBPF_HARDIRQ_H */
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
deleted file mode 100644
index fe33ff6a4..000000000
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ /dev/null
@@ -1,456 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_mdflush.h"
-
-struct config mdflush_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-#define MDFLUSH_MAP_COUNT 0
-static ebpf_local_maps_t mdflush_maps[] = {
- {
- .name = "tbl_mdflush",
- .internal_input = 1024,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- /* end */
- {
- .name = NULL,
- .internal_input = 0,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
- }
-};
-
-netdata_ebpf_targets_t mdflush_targets[] = { {.name = "md_flush_request", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-
-// store for "published" data from the reader thread, which the collector
-// thread will write to netdata agent.
-static avl_tree_lock mdflush_pub;
-
-// tmp store for mdflush values we get from a per-CPU eBPF map.
-static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL;
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probes
- *
- * Disable probes to use trampolines.
- *
- * @param obj the loaded object structure.
- */
-static inline void ebpf_disable_probes(struct mdflush_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_md_flush_request_kprobe, false);
-}
-
-/**
- * Disable trampolines
- *
- * Disable trampoliness to use probes.
- *
- * @param obj the loaded object structure.
- */
-static inline void ebpf_disable_trampoline(struct mdflush_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_md_flush_request_fentry, false);
-}
-
-/**
- * Set Trampoline
- *
- * Define target to attach trampoline
- *
- * @param obj the loaded object structure.
- */
-static void ebpf_set_trampoline_target(struct mdflush_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_md_flush_request_fentry, 0,
- mdflush_targets[NETDATA_MD_FLUSH_REQUEST].name);
-}
-
-/**
- * Load probe
- *
- * Load probe to monitor internal function.
- *
- * @param obj the loaded object structure.
- */
-static inline int ebpf_load_probes(struct mdflush_bpf *obj)
-{
- obj->links.netdata_md_flush_request_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_md_flush_request_kprobe,
- false,
- mdflush_targets[NETDATA_MD_FLUSH_REQUEST].name);
- return libbpf_get_error(obj->links.netdata_md_flush_request_kprobe);
-}
-
-/**
- * Load and Attach
- *
- * Load and attach bpf codes according user selection.
- *
- * @param obj the loaded object structure.
- * @param em the structure with configuration
- */
-static inline int ebpf_mdflush_load_and_attach(struct mdflush_bpf *obj, ebpf_module_t *em)
-{
- int mode = em->targets[NETDATA_MD_FLUSH_REQUEST].mode;
- if (mode == EBPF_LOAD_TRAMPOLINE) { // trampoline
- ebpf_disable_probes(obj);
-
- ebpf_set_trampoline_target(obj);
- } else // kprobe
- ebpf_disable_trampoline(obj);
-
- int ret = mdflush_bpf__load(obj);
- if (ret) {
- fprintf(stderr, "failed to load BPF object: %d\n", ret);
- return -1;
- }
-
- if (mode == EBPF_LOAD_TRAMPOLINE)
- ret = mdflush_bpf__attach(obj);
- else
- ret = ebpf_load_probes(obj);
-
- return ret;
-}
-
-#endif
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_mdflush_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete("mdstat",
- "mdstat_flush",
- "",
- "MD flushes",
- "flushes",
- "flush (eBPF)",
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- NETDATA_CHART_PRIO_MDSTAT_FLUSH,
- em->update_every);
-}
-
-/**
- * MDflush exit
- *
- * Cancel thread and exit.
- *
- * @param ptr thread data.
- */
-static void mdflush_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- ebpf_obsolete_mdflush_global(em);
-
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/**
- * Compare mdflush values.
- *
- * @param a `netdata_mdflush_t *`.
- * @param b `netdata_mdflush_t *`.
- *
- * @return 0 if a==b, 1 if a>b, -1 if a<b.
-*/
-static int mdflush_val_cmp(void *a, void *b)
-{
- netdata_mdflush_t *ptr1 = a;
- netdata_mdflush_t *ptr2 = b;
-
- if (ptr1->unit > ptr2->unit) {
- return 1;
- }
- else if (ptr1->unit < ptr2->unit) {
- return -1;
- }
- else {
- return 0;
- }
-}
-
-/**
- * Read count map
- *
- * Read the hash table and store data to allocated vectors.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void mdflush_read_count_map(int maps_per_core)
-{
- int mapfd = mdflush_maps[MDFLUSH_MAP_COUNT].map_fd;
- mdflush_ebpf_key_t curr_key = (uint32_t)-1;
- mdflush_ebpf_key_t key = (uint32_t)-1;
- netdata_mdflush_t search_v;
- netdata_mdflush_t *v = NULL;
-
- while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) {
- curr_key = key;
-
- // get val for this key.
- int test = bpf_map_lookup_elem(mapfd, &key, mdflush_ebpf_vals);
- if (unlikely(test < 0)) {
- continue;
- }
-
- // is this record saved yet?
- //
- // if not, make a new one, mark it as unsaved for now, and continue; we
- // will insert it at the end after all of its values are correctly set,
- // so that we can safely publish it to the collector within a single,
- // short locked operation.
- //
- // otherwise simply continue; we will only update the flush count,
- // which can be republished safely without a lock.
- //
- // NOTE: lock isn't strictly necessary for this initial search, as only
- // this thread does writing, but the AVL is using a read-write lock so
- // there is no congestion.
- bool v_is_new = false;
- search_v.unit = key;
- v = (netdata_mdflush_t *)avl_search_lock(
- &mdflush_pub,
- (avl_t *)&search_v
- );
- if (unlikely(v == NULL)) {
- // flush count can only be added reliably at a later time.
- // when they're added, only then will we AVL insert.
- v = callocz(1, sizeof(netdata_mdflush_t));
- v->unit = key;
- sprintf(v->disk_name, "md%u", key);
- v->dim_exists = false;
-
- v_is_new = true;
- }
-
- // we must add up count value for this record across all CPUs.
- uint64_t total_cnt = 0;
- int i;
- int end = (!maps_per_core) ? 1 : ebpf_nprocs;
- for (i = 0; i < end; i++) {
- total_cnt += mdflush_ebpf_vals[i];
- }
-
- // can now safely publish count for existing records.
- v->cnt = total_cnt;
-
- // can now safely publish new record.
- if (v_is_new) {
- avl_t *check = avl_insert_lock(&mdflush_pub, (avl_t *)v);
- if (check != (avl_t *)v) {
- netdata_log_error("Internal error, cannot insert the AVL tree.");
- }
- }
- }
-}
-
-static void mdflush_create_charts(int update_every)
-{
- ebpf_create_chart(
- "mdstat",
- "mdstat_flush",
- "MD flushes",
- "flushes",
- "flush (eBPF)",
- "md.flush",
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CHART_PRIO_MDSTAT_FLUSH,
- NULL, NULL, 0, update_every,
- NETDATA_EBPF_MODULE_NAME_MDFLUSH
- );
-
- fflush(stdout);
-}
-
-// callback for avl tree traversal on `mdflush_pub`.
-static int mdflush_write_dims(void *entry, void *data)
-{
- UNUSED(data);
-
- netdata_mdflush_t *v = entry;
-
- // records get dynamically added in, so add the dim if we haven't yet.
- if (!v->dim_exists) {
- ebpf_write_global_dimension(
- v->disk_name, v->disk_name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
- );
- v->dim_exists = true;
- }
-
- write_chart_dimension(v->disk_name, v->cnt);
-
- return 1;
-}
-
-/**
-* Main loop for this collector.
-*/
-static void mdflush_collector(ebpf_module_t *em)
-{
- mdflush_ebpf_vals = callocz(ebpf_nprocs, sizeof(mdflush_ebpf_val_t));
-
- int update_every = em->update_every;
- avl_init_lock(&mdflush_pub, mdflush_val_cmp);
-
- // create chart and static dims.
- pthread_mutex_lock(&lock);
- mdflush_create_charts(update_every);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- // loop and read from published data until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- mdflush_read_count_map(maps_per_core);
- pthread_mutex_lock(&lock);
- // write dims now for all hitherto discovered devices.
- ebpf_write_begin_chart("mdstat", "mdstat_flush", "");
- avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL);
- ebpf_write_end_chart();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_mdflush_load_bpf(ebpf_module_t *em)
-{
- int ret = 0;
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- mdflush_bpf_obj = mdflush_bpf__open();
- if (!mdflush_bpf_obj)
- ret = -1;
- else {
- ret = ebpf_mdflush_load_and_attach(mdflush_bpf_obj, em);
- if (ret && em->targets[NETDATA_MD_FLUSH_REQUEST].mode == EBPF_LOAD_TRAMPOLINE) {
- mdflush_bpf__destroy(mdflush_bpf_obj);
- mdflush_bpf_obj = mdflush_bpf__open();
- if (!mdflush_bpf_obj)
- ret = -1;
- else {
- em->targets[NETDATA_MD_FLUSH_REQUEST].mode = EBPF_LOAD_PROBE;
- ret = ebpf_mdflush_load_and_attach(mdflush_bpf_obj, em);
- }
- }
- }
- }
-#endif
-
- return ret;
-}
-
-
-/**
- * mdflush thread.
- *
- * @param ptr a `ebpf_module_t *`.
- * @return always NULL.
- */
-void *ebpf_mdflush_thread(void *ptr)
-{
- netdata_thread_cleanup_push(mdflush_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = mdflush_maps;
-
- char *md_flush_request = ebpf_find_symbol("md_flush_request");
- if (!md_flush_request) {
- netdata_log_error("Cannot monitor MD devices, because md is not loaded.");
- goto endmdflush;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_mdflush_load_bpf(em)) {
- netdata_log_error("Cannot load eBPF software.");
- goto endmdflush;
- }
-
- mdflush_collector(em);
-
-endmdflush:
- freez(md_flush_request);
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/collectors/ebpf.plugin/ebpf_mdflush.h
deleted file mode 100644
index 629550746..000000000
--- a/collectors/ebpf.plugin/ebpf_mdflush.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_MDFLUSH_H
-#define NETDATA_EBPF_MDFLUSH_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_MDFLUSH "mdflush"
-#define NETDATA_EBPF_MD_MODULE_DESC "Show information about multi-device software flushes."
-
-// charts
-#define NETDATA_MDFLUSH_GLOBAL_CHART "mdflush"
-
-// configuration file
-#define NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE "mdflush.conf"
-
-// copy of mdflush types from kernel-collectors repo.
-typedef uint32_t mdflush_ebpf_key_t;
-typedef uint64_t mdflush_ebpf_val_t;
-
-typedef struct netdata_mdflush {
- // must be at top for simplified AVL tree usage.
- // if it's not at the top, we need to use `containerof` for almost all ops.
- avl_t avl;
-
- // key & name of device.
- // the name is generated by the key, usually as `md<unit>`.
- uint32_t unit;
- char disk_name[32];
-
- // have we defined the dimension for this device yet?
- bool dim_exists;
-
- // incremental flush count value.
- uint64_t cnt;
-} netdata_mdflush_t;
-
-enum netdata_mdflush_targets {
- NETDATA_MD_FLUSH_REQUEST,
-
- NETDATA_MD_FLUSH_END
-};
-
-void *ebpf_mdflush_thread(void *ptr);
-
-extern struct config mdflush_config;
-extern netdata_ebpf_targets_t mdflush_targets[];
-
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
deleted file mode 100644
index 05c76540a..000000000
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ /dev/null
@@ -1,517 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_mount.h"
-
-static ebpf_local_maps_t mount_maps[] = {{.name = "tbl_mount", .internal_input = NETDATA_MOUNT_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umount" };
-static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL];
-static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL];
-
-struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
-
-netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "umount", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef LIBBPF_MAJOR_VERSION
-/*****************************************************************
- *
- * BTF FUNCTIONS
- *
- *****************************************************************/
-
-/*
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_mount_disable_probe(struct mount_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_mount_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_umount_probe, false);
-
- bpf_program__set_autoload(obj->progs.netdata_mount_retprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_umount_retprobe, false);
-}
-
-/*
- * Disable tracepoint
- *
- * Disable all tracepoints to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_mount_disable_tracepoint(struct mount_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_mount_exit, false);
- bpf_program__set_autoload(obj->progs.netdata_umount_exit, false);
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_mount_disable_trampoline(struct mount_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_mount_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_umount_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_mount_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_umount_fexit, false);
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void netdata_set_trampoline_target(struct mount_bpf *obj)
-{
- char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH + 1];
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- mount_targets[NETDATA_MOUNT_SYSCALL].name, running_on_kernel);
-
- bpf_program__set_attach_target(obj->progs.netdata_mount_fentry, 0,
- syscall);
-
- bpf_program__set_attach_target(obj->progs.netdata_mount_fexit, 0,
- syscall);
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- mount_targets[NETDATA_UMOUNT_SYSCALL].name, running_on_kernel);
-
- bpf_program__set_attach_target(obj->progs.netdata_umount_fentry, 0,
- syscall);
-
- bpf_program__set_attach_target(obj->progs.netdata_umount_fexit, 0,
- syscall);
-}
-
-/**
- * Mount Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_mount_attach_probe(struct mount_bpf *obj)
-{
- char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH + 1];
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- mount_targets[NETDATA_MOUNT_SYSCALL].name, running_on_kernel);
-
- obj->links.netdata_mount_probe = bpf_program__attach_kprobe(obj->progs.netdata_mount_probe,
- false, syscall);
- int ret = (int)libbpf_get_error(obj->links.netdata_mount_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_mount_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_mount_retprobe,
- true, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_mount_retprobe);
- if (ret)
- return -1;
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- mount_targets[NETDATA_UMOUNT_SYSCALL].name, running_on_kernel);
-
- obj->links.netdata_umount_probe = bpf_program__attach_kprobe(obj->progs.netdata_umount_probe,
- false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_umount_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_umount_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_umount_retprobe,
- true, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_umount_retprobe);
- if (ret)
- return -1;
-
- return 0;
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_mount_set_hash_tables(struct mount_bpf *obj)
-{
- mount_maps[NETDATA_KEY_MOUNT_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_mount);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_MOUNT_SYSCALL].mode;
-
- // We are testing only one, because all will have the same behavior
- if (test == EBPF_LOAD_TRAMPOLINE ) {
- ebpf_mount_disable_probe(obj);
- ebpf_mount_disable_tracepoint(obj);
-
- netdata_set_trampoline_target(obj);
- } else if (test == EBPF_LOAD_PROBE ||
- test == EBPF_LOAD_RETPROBE ) {
- ebpf_mount_disable_tracepoint(obj);
- ebpf_mount_disable_trampoline(obj);
- } else {
- ebpf_mount_disable_probe(obj);
- ebpf_mount_disable_trampoline(obj);
- }
-
- ebpf_update_map_type(obj->maps.tbl_mount, &mount_maps[NETDATA_KEY_MOUNT_TABLE]);
-
- int ret = mount_bpf__load(obj);
- if (!ret) {
- if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE )
- ret = mount_bpf__attach(obj);
- else
- ret = ebpf_mount_attach_probe(obj);
-
- if (!ret)
- ebpf_mount_set_hash_tables(obj);
- }
-
- return ret;
-}
-#endif
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_mount_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
- NETDATA_EBPF_MOUNT_CALLS,
- "",
- "Calls to mount and umount syscalls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MOUNT_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
- NETDATA_EBPF_MOUNT_ERRORS,
- "",
- "Errors to mount and umount file systems",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MOUNT_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS + 1,
- em->update_every);
-}
-
-/**
- * Mount Exit
- *
- * Cancel child thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_mount_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- ebpf_obsolete_mount_global(em);
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (mount_bpf_obj) {
- mount_bpf__destroy(mount_bpf_obj);
- mount_bpf_obj = NULL;
- }
-#endif
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Read global table
- *
- * Read the table with number of calls for all functions
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_mount_read_global_table(int maps_per_core)
-{
- static netdata_idx_t *mount_values = NULL;
- if (!mount_values)
- mount_values = callocz((size_t)ebpf_nprocs + 1, sizeof(netdata_idx_t));
-
- uint32_t idx;
- netdata_idx_t *val = mount_hash_values;
- netdata_idx_t *stored = mount_values;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- int fd = mount_maps[NETDATA_KEY_MOUNT_TABLE].map_fd;
-
- for (idx = NETDATA_KEY_MOUNT_CALL; idx < NETDATA_MOUNT_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- memset(stored, 0, length);
- }
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
-*/
-static void ebpf_mount_send_data()
-{
- int i, j;
- int end = NETDATA_EBPF_MOUNT_SYSCALL;
- for (i = NETDATA_KEY_MOUNT_CALL, j = NETDATA_KEY_MOUNT_ERROR; i < end; i++, j++) {
- mount_publish_aggregated[i].ncall = mount_hash_values[i];
- mount_publish_aggregated[i].nerr = mount_hash_values[j];
- }
-
- write_count_chart(NETDATA_EBPF_MOUNT_CALLS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
- mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL);
-
- write_err_chart(NETDATA_EBPF_MOUNT_ERRORS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
- mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void mount_collector(ebpf_module_t *em)
-{
- memset(mount_hash_values, 0, sizeof(mount_hash_values));
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- ebpf_mount_read_global_table(maps_per_core);
- pthread_mutex_lock(&lock);
-
- ebpf_mount_send_data();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * INITIALIZE THREAD
- *
- *****************************************************************/
-
-/**
- * Create mount charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_mount_charts(int update_every)
-{
- ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_CALLS,
- "Calls to mount and umount syscalls",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS,
- ebpf_create_global_dimension,
- mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL,
- update_every, NETDATA_EBPF_MODULE_NAME_MOUNT);
-
- ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_ERRORS,
- "Errors to mount and umount file systems",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS + 1,
- ebpf_create_global_dimension,
- mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL,
- update_every, NETDATA_EBPF_MODULE_NAME_MOUNT);
-
- fflush(stdout);
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_mount_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- mount_bpf_obj = mount_bpf__open();
- if (!mount_bpf_obj)
- ret = -1;
- else
- ret = ebpf_mount_load_and_attach(mount_bpf_obj, em);
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Mount thread
- *
- * Thread used to make mount thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always returns NULL
- */
-void *ebpf_mount_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_mount_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = mount_maps;
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_mount_load_bpf(em)) {
- goto endmount;
- }
-
- int algorithms[NETDATA_EBPF_MOUNT_SYSCALL] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
-
- ebpf_global_labels(mount_aggregated_data, mount_publish_aggregated, mount_dimension_name, mount_dimension_name,
- algorithms, NETDATA_EBPF_MOUNT_SYSCALL);
-
- pthread_mutex_lock(&lock);
- ebpf_create_mount_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- mount_collector(em);
-
-endmount:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/collectors/ebpf.plugin/ebpf_mount.h
deleted file mode 100644
index 768914b02..000000000
--- a/collectors/ebpf.plugin/ebpf_mount.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_MOUNT_H
-#define NETDATA_EBPF_MOUNT_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_MOUNT "mount"
-#define NETDATA_EBPF_MOUNT_MODULE_DESC "Show calls to syscalls mount(2) and umount(2)."
-
-#define NETDATA_EBPF_MOUNT_SYSCALL 2
-
-#define NETDATA_EBPF_MOUNT_CALLS "call"
-#define NETDATA_EBPF_MOUNT_ERRORS "error"
-#define NETDATA_EBPF_MOUNT_FAMILY "mount (eBPF)"
-
-// Process configuration name
-#define NETDATA_MOUNT_CONFIG_FILE "mount.conf"
-
-enum mount_counters {
- NETDATA_KEY_MOUNT_CALL,
- NETDATA_KEY_UMOUNT_CALL,
- NETDATA_KEY_MOUNT_ERROR,
- NETDATA_KEY_UMOUNT_ERROR,
-
- NETDATA_MOUNT_END
-};
-
-enum mount_tables {
- NETDATA_KEY_MOUNT_TABLE
-};
-
-enum netdata_mount_syscalls {
- NETDATA_MOUNT_SYSCALL,
- NETDATA_UMOUNT_SYSCALL,
-
- NETDATA_MOUNT_SYSCALLS_END
-};
-
-extern struct config mount_config;
-void *ebpf_mount_thread(void *ptr);
-extern netdata_ebpf_targets_t mount_targets[];
-
-#endif /* NETDATA_EBPF_MOUNT_H */
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
deleted file mode 100644
index 2c34650c3..000000000
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ /dev/null
@@ -1,565 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_oomkill.h"
-
-struct config oomkill_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-#define OOMKILL_MAP_KILLCNT 0
-static ebpf_local_maps_t oomkill_maps[] = {
- {
- .name = "tbl_oomkill",
- .internal_input = NETDATA_OOMKILL_MAX_ENTRIES,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- /* end */
- {
- .name = NULL,
- .internal_input = 0,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- }
-};
-
-static ebpf_tracepoint_t oomkill_tracepoints[] = {
- {.enabled = false, .class = "oom", .event = "mark_victim"},
- /* end */
- {.enabled = false, .class = NULL, .event = NULL}
-};
-
-static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill", .dimension = "oomkill",
- .algorithm = "absolute",
- .next = NULL};
-
-static void ebpf_create_specific_oomkill_charts(char *type, int update_every);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_oomkill_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_OOMKILL_CHART,
- "",
- "OOM kills. This chart is provided by eBPF plugin.",
- EBPF_COMMON_DIMENSION_KILLS,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 20191,
- em->update_every);
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_oomkill_cgroup_charts(ebpf_module_t *em)
-{
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_oomkill_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_create_specific_oomkill_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_oomkill_apps(ebpf_module_t *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_OOMKILL_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_app_oomkill",
- "OOM kills.",
- EBPF_COMMON_DIMENSION_KILLS,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "ebpf.app_oomkill",
- 20020,
- update_every);
-
- w->charts_created &= ~(1<<EBPF_MODULE_OOMKILL_IDX);
- }
-}
-
-/**
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void oomkill_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- if (em->cgroup_charts) {
- ebpf_obsolete_oomkill_cgroup_charts(em);
- }
-
- ebpf_obsolete_oomkill_apps(em);
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-static void oomkill_write_data(int32_t *keys, uint32_t total)
-{
- // for each app, see if it was OOM killed. record as 1 if so otherwise 0.
- struct ebpf_target *w;
- for (w = apps_groups_root_target; w != NULL; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_OOMKILL_IDX))))
- continue;
-
- bool was_oomkilled = false;
- if (total) {
- struct ebpf_pid_on_target *pids = w->root_pid;
- while (pids) {
- uint32_t j;
- for (j = 0; j < total; j++) {
- if (pids->pid == keys[j]) {
- was_oomkilled = true;
- // set to 0 so we consider it "done".
- keys[j] = 0;
- goto write_dim;
- }
- }
- pids = pids->next;
- }
- }
-write_dim:
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_oomkill");
- write_chart_dimension(EBPF_COMMON_DIMENSION_KILLS, was_oomkilled);
- ebpf_write_end_chart();
- }
-
- // for any remaining keys for which we couldn't find a group, this could be
- // for various reasons, but the primary one is that the PID has not yet
- // been picked up by the process thread when parsing the proc filesystem.
- // since it's been OOM killed, it will never be parsed in the future, so
- // we have no choice but to dump it into `other`.
- uint32_t j;
- uint32_t rem_count = 0;
- for (j = 0; j < total; j++) {
- int32_t key = keys[j];
- if (key != 0) {
- rem_count += 1;
- }
- }
- if (rem_count > 0) {
- write_chart_dimension("other", rem_count);
- }
-}
-
-/**
- * Create specific OOMkill charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_oomkill_charts(char *type, int update_every)
-{
- ebpf_create_chart(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
- EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_CGROUP_OOMKILLS_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600,
- ebpf_create_global_dimension,
- &oomkill_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL);
-}
-
-/**
- * Create Systemd OOMkill Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_oomkill_charts(int update_every)
-{
- ebpf_create_charts_on_systemd(NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
- EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
- NETDATA_EBPF_MODULE_NAME_OOMKILL, update_every);
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_oomkill_charts()
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->oomkill);
- ect->oomkill = 0;
- }
- }
- ebpf_write_end_chart();
-}
-
-/*
- * Send Specific OOMkill data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param value value for oomkill
- */
-static void ebpf_send_specific_oomkill_data(char *type, int value)
-{
- ebpf_write_begin_chart(type, NETDATA_OOMKILL_CHART, "");
- write_chart_dimension(oomkill_publish_aggregated.name, (long long)value);
- ebpf_write_end_chart();
-}
-
-/**
- * Create specific OOMkill charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every)
-{
- ebpf_write_chart_obsolete(type, NETDATA_OOMKILL_CHART, "", "OOM kills. This chart is provided by eBPF plugin.",
- EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_OOMKILLS_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600, update_every);
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-void ebpf_oomkill_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_oomkill_charts(update_every);
- }
- ebpf_send_systemd_oomkill_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART) && ect->updated) {
- ebpf_create_specific_oomkill_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART && ect->updated) {
- ebpf_send_specific_oomkill_data(ect->name, ect->oomkill);
- } else {
- ebpf_obsolete_specific_oomkill_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART;
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Read data
- *
- * Read OOMKILL events from table.
- *
- * @param keys vector where data will be stored
- *
- * @return It returns the number of read elements
- */
-static uint32_t oomkill_read_data(int32_t *keys)
-{
- // the first `i` entries of `keys` will contain the currently active PIDs
- // in the eBPF map.
- uint32_t i = 0;
-
- uint32_t curr_key = 0;
- uint32_t key = 0;
- int mapfd = oomkill_maps[OOMKILL_MAP_KILLCNT].map_fd;
- while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) {
- curr_key = key;
-
- keys[i] = (int32_t)key;
- i += 1;
-
- // delete this key now that we've recorded its existence. there's no
- // race here, as the same PID will only get OOM killed once.
- int test = bpf_map_delete_elem(mapfd, &key);
- if (unlikely(test < 0)) {
- // since there's only 1 thread doing these deletions, it should be
- // impossible to get this condition.
- netdata_log_error("key unexpectedly not available for deletion.");
- }
- }
-
- return i;
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in
- *
- * @param keys vector with pids that had oomkill event
- * @param total number of elements in keys vector.
- */
-static void ebpf_update_oomkill_cgroup(int32_t *keys, uint32_t total)
-{
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- ect->oomkill = 0;
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- uint32_t j;
- int32_t pid = pids->pid;
- for (j = 0; j < total; j++) {
- if (pid == keys[j]) {
- ect->oomkill = 1;
- break;
- }
- }
- }
- }
-}
-
-/**
- * Update OOMkill period
- *
- * Update oomkill period according function arguments.
- *
- * @param running_time current value of running_value.
- * @param em the thread main structure.
- *
- * @return It returns new running_time value.
- */
-static int ebpf_update_oomkill_period(int running_time, ebpf_module_t *em)
-{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = em->update_every;
- else
- running_time += em->update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- return running_time;
-}
-
-/**
-* Main loop for this collector.
- *
- * @param em the thread main structure.
-*/
-static void oomkill_collector(ebpf_module_t *em)
-{
- int cgroups = em->cgroup_charts;
- int update_every = em->update_every;
- int32_t keys[NETDATA_OOMKILL_MAX_ENTRIES];
- memset(keys, 0, sizeof(keys));
-
- // loop and read until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
-
- uint32_t count = oomkill_read_data(keys);
- if (!count) {
- running_time = ebpf_update_oomkill_period(running_time, em);
- }
-
- stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count;
- stats[NETDATA_CONTROLLER_PID_TABLE_DEL] += (uint64_t) count;
-
- pthread_mutex_lock(&collect_data_mutex);
- pthread_mutex_lock(&lock);
- if (cgroups && count) {
- ebpf_update_oomkill_cgroup(keys, count);
- // write everything from the ebpf map.
- ebpf_oomkill_send_cgroup_data(update_every);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- oomkill_write_data(keys, count);
- }
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- running_time = ebpf_update_oomkill_period(running_time, em);
- }
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_oomkill",
- "OOM kills.",
- EBPF_COMMON_DIMENSION_KILLS,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_oomkill",
- 20072,
- update_every,
- NETDATA_EBPF_MODULE_NAME_OOMKILL);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION kills '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- w->charts_created |= 1<<EBPF_MODULE_OOMKILL_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/**
- * OOM kill tracking thread.
- *
- * @param ptr a `ebpf_module_t *`.
- * @return always NULL.
- */
-void *ebpf_oomkill_thread(void *ptr)
-{
- netdata_thread_cleanup_push(oomkill_cleanup, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = oomkill_maps;
-
-#define NETDATA_DEFAULT_OOM_DISABLED_MSG "Disabling OOMKILL thread, because"
- if (unlikely(!ebpf_all_pids || !em->apps_charts)) {
- // When we are not running integration with apps, we won't fill necessary variables for this thread to run, so
- // we need to disable it.
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->enabled)
- netdata_log_info("%s apps integration is completely disabled.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- goto endoomkill;
- } else if (running_on_kernel < NETDATA_EBPF_KERNEL_4_14) {
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->enabled)
- netdata_log_info("%s kernel does not have necessary tracepoints.", NETDATA_DEFAULT_OOM_DISABLED_MSG);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- goto endoomkill;
- }
-
- if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) {
- goto endoomkill;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- goto endoomkill;
- }
-
- pthread_mutex_lock(&lock);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- oomkill_collector(em);
-
-endoomkill:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/collectors/ebpf.plugin/ebpf_oomkill.h
deleted file mode 100644
index 4a5fa62aa..000000000
--- a/collectors/ebpf.plugin/ebpf_oomkill.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_OOMKILL_H
-#define NETDATA_EBPF_OOMKILL_H 1
-
-// Module description
-#define NETDATA_EBPF_OOMKILL_MODULE_DESC "Show OOM kills for all applications recognized via the apps.plugin."
-
-/*****************************************************************
- * copied from kernel-collectors repo, with modifications needed
- * for inclusion here.
- *****************************************************************/
-
-#define NETDATA_OOMKILL_MAX_ENTRIES 64
-
-typedef uint8_t oomkill_ebpf_val_t;
-
-/*****************************************************************
- * below this is eBPF plugin-specific code.
- *****************************************************************/
-
-#define NETDATA_EBPF_MODULE_NAME_OOMKILL "oomkill"
-#define NETDATA_OOMKILL_CONFIG_FILE "oomkill.conf"
-
-#define NETDATA_OOMKILL_CHART "oomkills"
-
-// Contexts
-#define NETDATA_CGROUP_OOMKILLS_CONTEXT "cgroup.oomkills"
-
-extern struct config oomkill_config;
-void *ebpf_oomkill_thread(void *ptr);
-void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr);
-
-#endif /* NETDATA_EBPF_OOMKILL_H */
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
deleted file mode 100644
index e3e2b884e..000000000
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ /dev/null
@@ -1,1369 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/resource.h>
-
-#include "ebpf.h"
-#include "ebpf_process.h"
-
-/*****************************************************************
- *
- * GLOBAL VARIABLES
- *
- *****************************************************************/
-
-static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "process", "task", "process", "thread" };
-static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_exit", "release_task", "_do_fork", "sys_clone" };
-static char *status[] = { "process", "zombie" };
-
-static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tbl_total_stats", .internal_input = NETDATA_KEY_END_VECTOR,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "process_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-char *tracepoint_sched_type = { "sched" } ;
-char *tracepoint_sched_process_exit = { "sched_process_exit" };
-char *tracepoint_sched_process_exec = { "sched_process_exec" };
-char *tracepoint_sched_process_fork = { "sched_process_fork" };
-static int was_sched_process_exit_enabled = 0;
-static int was_sched_process_exec_enabled = 0;
-static int was_sched_process_fork_enabled = 0;
-
-static netdata_idx_t *process_hash_values = NULL;
-ebpf_process_stat_t *process_stat_vector = NULL;
-static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
-static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
-
-struct config process_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-#ifdef NETDATA_DEV_MODE
-int process_disable_priority;
-#endif
-
-/*****************************************************************
- *
- * PROCESS DATA AND SEND TO NETDATA
- *
- *****************************************************************/
-
-/**
- * Update publish structure before to send data to Netdata.
- *
- * @param publish the first output structure with independent dimensions
- * @param pvc the second output structure with correlated dimensions
- * @param input the structure with the input data.
- */
-static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc,
- netdata_syscall_stat_t *input)
-{
- netdata_publish_syscall_t *move = publish;
- int selector = NETDATA_KEY_PUBLISH_PROCESS_EXIT;
- while (move) {
- move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
- move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
- move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
-
- move->pcall = input->call;
- move->pbyte = input->bytes;
- move->perr = input->ecall;
-
- input = input->next;
- move = move->next;
- selector++;
- }
-
- pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall -
- (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
- publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
- pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall +
- (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param family the chart family
- * @param move the pointer with the values that will be published
- */
-static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc)
-{
- ebpf_write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME, "");
-
- write_chart_dimension(status[0], (long long)pvc->running);
- write_chart_dimension(status[1], (long long)pvc->zombie);
-
- ebpf_write_end_chart();
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- */
-static void ebpf_process_send_data(ebpf_module_t *em)
-{
- netdata_publish_vfs_common_t pvc;
- ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
-
- write_count_chart(NETDATA_EXIT_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], 2);
- write_count_chart(NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
-
- write_status_chart(NETDATA_EBPF_SYSTEM_GROUP, &pvc);
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_SYSTEM_GROUP,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
- }
-}
-
-/**
- * Sum values for pid
- *
- * @param root the structure with all available PIDs
- * @param offset the address that we are reading
- *
- * @return it returns the sum of all PIDs
- */
-long long ebpf_process_sum_values_for_pids(struct ebpf_pid_on_target *root, size_t offset)
-{
- long long ret = 0;
- while (root) {
- int32_t pid = root->pid;
- ebpf_process_stat_t *w = global_process_stats[pid];
- if (w) {
- uint32_t *value = (uint32_t *)((char *)w + offset);
- ret += *value;
- }
-
- root = root->next;
- }
-
- return ret;
-}
-
-/**
- * Remove process pid
- *
- * Remove from PID task table when task_release was called.
- */
-void ebpf_process_remove_pids()
-{
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
- while (pids) {
- uint32_t pid = pids->pid;
- ebpf_process_stat_t *w = global_process_stats[pid];
- if (w) {
- ebpf_process_stat_release(w);
- global_process_stats[pid] = NULL;
- bpf_map_delete_elem(pid_fd, &pid);
- }
-
- pids = pids->next;
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param root the target list.
- */
-void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
-{
- struct ebpf_target *w;
- // This algorithm is improved in https://github.com/netdata/netdata/pull/16030
- collected_number values[5];
-
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_PROCESS_IDX))))
- continue;
-
- values[0] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_process));
- values[1] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_thread));
- values[2] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- exit_call));
- values[3] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- release_call));
- values[4] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- task_err));
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_process_start");
- write_chart_dimension("calls", values[0]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_thread_start");
- write_chart_dimension("calls", values[1]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_exit");
- write_chart_dimension("calls", values[2]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_released");
- write_chart_dimension("calls", values[3]);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_error");
- write_chart_dimension("calls", values[4]);
- ebpf_write_end_chart();
- }
- }
-
- ebpf_process_remove_pids();
-}
-
-/*****************************************************************
- *
- * READ INFORMATION FROM KERNEL RING
- *
- *****************************************************************/
-
-/**
- * Read the hash table and store data to allocated vectors.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_read_process_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
-{
- netdata_idx_t res[NETDATA_KEY_END_VECTOR];
- ebpf_read_global_table_stats(res,
- process_hash_values,
- process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd,
- maps_per_core,
- 0,
- NETDATA_KEY_END_VECTOR);
-
- ebpf_read_global_table_stats(stats,
- process_hash_values,
- process_maps[NETDATA_PROCESS_CTRL_TABLE].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].call = res[NETDATA_KEY_CALLS_SYS_CLONE];
-
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in PID running.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_process_cgroup(int maps_per_core)
-{
- ebpf_cgroup_target_t *ect ;
- int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
-
- size_t length = sizeof(ebpf_process_stat_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- ebpf_process_stat_t *out = &pids->ps;
- if (global_process_stats[pid]) {
- ebpf_process_stat_t *in = global_process_stats[pid];
-
- memcpy(out, in, sizeof(ebpf_process_stat_t));
- } else {
- if (bpf_map_lookup_elem(pid_fd, &pid, process_stat_vector)) {
- memset(out, 0, sizeof(ebpf_process_stat_t));
- }
-
- ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
-
- memcpy(out, process_stat_vector, sizeof(ebpf_process_stat_t));
-
- memset(process_stat_vector, 0, length);
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create process status chart
- *
- * @param family the chart family
- * @param name the chart name
- * @param axis the axis label
- * @param web the group name used to attach the chart on dashboard
- * @param order the order number of the specified chart
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_process_status_chart(char *family, char *name, char *axis,
- char *web, char *algorithm, int order, int update_every)
-{
- printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'process'\n",
- family,
- name,
- axis,
- web,
- order,
- update_every);
-
- printf("DIMENSION %s '' %s 1 1\n", status[0], algorithm);
- printf("DIMENSION %s '' %s 1 1\n", status[1], algorithm);
-}
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param em a pointer to the structure with the default values.
- */
-static void ebpf_create_global_charts(ebpf_module_t *em)
-{
- ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_SYSCALL,
- "Start process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21002,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_EXIT_SYSCALL,
- "Exit process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21003,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_process_status_chart(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_STATUS_NAME,
- EBPF_COMMON_DIMENSION_DIFFERENCE,
- NETDATA_PROCESS_GROUP,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- 21004, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_ERROR_NAME,
- "Fails to create process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21005,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- }
-
- fflush(stdout);
-}
-
-/**
- * Create process apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- * @param ptr a pointer for the targets.
- */
-void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_process_start",
- "Process started.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_process_start",
- 20161,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_thread_start",
- "Threads started.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_thread_start",
- 20162,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_exit",
- "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_exit",
- 20163,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_released",
- "Tasks released.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_released",
- 20164,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_error",
- "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_error",
- 20165,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- }
- w->charts_created |= 1<<EBPF_MODULE_PROCESS_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_process_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_TASK_PROCESS,
- "",
- "Process started",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20065,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_TASK_THREAD,
- "",
- "Threads started",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20066,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_TASK_CLOSE,
- "",
- "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20067,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_TASK_EXIT,
- "",
- "Tasks closed",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20068,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_TASK_ERROR,
- "",
- "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20069,
- em->update_every);
- }
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_process_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_process_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_process_charts(ect->name, em);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_process_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_PROCESS_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_process_start",
- "Process started.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_process_start",
- 20161,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_thread_start",
- "Threads started.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_thread_start",
- 20162,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_exit",
- "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_exit",
- 20163,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_released",
- "Tasks released.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_released",
- 20164,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_task_error",
- "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_task_error",
- 20165,
- update_every);
- }
-
- w->charts_created &= ~(1<<EBPF_MODULE_PROCESS_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_process_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_SYSCALL,
- "",
- "Start process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21002,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_EXIT_SYSCALL,
- "",
- "Exit process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21003,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_STATUS_NAME,
- "",
- "Process not closed",
- EBPF_COMMON_DIMENSION_DIFFERENCE,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21004,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_PROCESS_ERROR_NAME,
- "",
- "Fails to create process",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21005,
- em->update_every);
- }
-}
-
-/**
- * Process disable tracepoints
- *
- * Disable tracepoints when the plugin was responsible to enable it.
- */
-static void ebpf_process_disable_tracepoints()
-{
- char *default_message = { "Cannot disable the tracepoint" };
- if (!was_sched_process_exit_enabled) {
- if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit))
- netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit);
- }
-
- if (!was_sched_process_exec_enabled) {
- if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec))
- netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec);
- }
-
- if (!was_sched_process_fork_enabled) {
- if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork))
- netdata_log_error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork);
- }
-}
-
-/**
- * Process Exit
- *
- * Cancel child thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_process_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_process_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_process_apps_charts(em);
- }
-
- ebpf_obsolete_process_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- ebpf_statistic_obsolete_aral_chart(em, process_disable_priority);
-#endif
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- freez(process_hash_values);
- freez(process_stat_vector);
-
- ebpf_process_disable_tracepoints();
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- process_pid_fd = -1;
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS WITH THE MAIN LOOP
- *
- *****************************************************************/
-
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param ps structure used to store data
- * @param pids input data
- */
-static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_target2 *pids)
-{
- ebpf_process_stat_t accumulator;
- memset(&accumulator, 0, sizeof(accumulator));
-
- while (pids) {
- ebpf_process_stat_t *pps = &pids->ps;
-
- accumulator.exit_call += pps->exit_call;
- accumulator.release_call += pps->release_call;
- accumulator.create_process += pps->create_process;
- accumulator.create_thread += pps->create_thread;
-
- accumulator.task_err += pps->task_err;
-
- pids = pids->next;
- }
-
- ps->exit_call = (accumulator.exit_call >= ps->exit_call) ? accumulator.exit_call : ps->exit_call;
- ps->release_call = (accumulator.release_call >= ps->release_call) ? accumulator.release_call : ps->release_call;
- ps->create_process = (accumulator.create_process >= ps->create_process) ? accumulator.create_process : ps->create_process;
- ps->create_thread = (accumulator.create_thread >= ps->create_thread) ? accumulator.create_thread : ps->create_thread;
-
- ps->task_err = (accumulator.task_err >= ps->task_err) ? accumulator.task_err : ps->task_err;
-}
-
-/*
- * Send Specific Process data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- * @param em the structure with thread information
- */
-static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *values, ebpf_module_t *em)
-{
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "");
- write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK].name,
- (long long) values->create_process);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "");
- write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE].name,
- (long long) values->create_thread);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "");
- write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name,
- (long long) values->release_call);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "");
- write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].name,
- (long long) values->release_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "");
- write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name,
- (long long) values->task_err);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Create specific process charts
- *
- * Create charts for cgroup/application
- *
- * @param type the chart type.
- * @param em the structure with thread information
- */
-static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
-{
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
- NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000,
- ebpf_create_global_dimension, &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
- NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
- NETDATA_CGROUP_PROCESS_EXIT_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
- NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
- NETDATA_CGROUP_PROCESS_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- }
-}
-
-/**
- * Obsolete specific process charts
- *
- * Obsolete charts for cgroup/application
- *
- * @param type the chart type.
- * @param em the structure with thread information
- */
-static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "", "Process started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000,
- em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "", "Threads started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001,
- em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "","Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_PROCESS_EXIT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002,
- em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "","Tasks closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "","Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_PROCESS_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004,
- em->update_every);
- }
-}
-
-/**
- * Create Systemd process Charts
- *
- * Create charts when systemd is enabled
- *
- * @param em the structure with thread information
- **/
-static void ebpf_create_systemd_process_charts(ebpf_module_t *em)
-{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
- }
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- *
- * @param em the structure with thread information
- */
-static void ebpf_send_systemd_process_charts(ebpf_module_t *em)
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.create_thread);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.exit_call);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.release_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.task_err);
- }
- }
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
-*/
-static void ebpf_process_send_cgroup_data(ebpf_module_t *em)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_process_sum_cgroup_pids(&ect->publish_systemd_ps, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
-
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_process_charts(em);
- }
-
- ebpf_send_systemd_process_charts(em);
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) && ect->updated) {
- ebpf_create_specific_process_charts(ect->name, em);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) {
- if (ect->updated) {
- ebpf_send_specific_process_data(ect->name, &ect->publish_systemd_ps, em);
- } else {
- ebpf_obsolete_specific_process_charts(ect->name, em);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Update Cgroup algorithm
- *
- * Change algorithm from absolute to incremental
- */
-void ebpf_process_update_cgroup_algorithm()
-{
- int i;
- for (i = 0; i < NETDATA_KEY_PUBLISH_PROCESS_END; i++) {
- netdata_publish_syscall_t *ptr = &process_publish_aggregated[i];
- ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
- }
-}
-
-/**
- * Main loop for this collector.
- *
- * @param em the structure with thread information
- */
-static void process_collector(ebpf_module_t *em)
-{
- heartbeat_t hb;
- heartbeat_init(&hb);
- int publish_global = em->global_charts;
- int cgroups = em->cgroup_charts;
- pthread_mutex_lock(&ebpf_exit_cleanup);
- process_pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- if (cgroups)
- ebpf_process_update_cgroup_algorithm();
-
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
- (void)dt;
- if (ebpf_plugin_exit)
- break;
-
- if (++counter == update_every) {
- counter = 0;
-
- ebpf_read_process_hash_global_tables(stats, maps_per_core);
-
- netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
- pthread_mutex_lock(&collect_data_mutex);
-
- if (ebpf_all_pids_count > 0) {
- if (cgroups && shm_ebpf_cgroup.header) {
- ebpf_update_process_cgroup(maps_per_core);
- }
- }
-
- pthread_mutex_lock(&lock);
-
- if (publish_global) {
- ebpf_process_send_data(em);
- }
-
- if (apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_process_send_apps_data(apps_groups_root_target, em);
- }
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- ebpf_send_data_aral_chart(ebpf_aral_process_stat, em);
-#endif
-
- if (cgroups && shm_ebpf_cgroup.header) {
- ebpf_process_send_cgroup_data(em);
- }
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-
- fflush(stdout);
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO START THREAD
- *
- *****************************************************************/
-
-/**
- * Allocate vectors used with this thread.
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param length is the length for the vectors used inside the collector.
- */
-static void ebpf_process_allocate_global_vectors(size_t length)
-{
- memset(process_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
- memset(process_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
- process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- process_stat_vector = callocz(ebpf_nprocs, sizeof(ebpf_process_stat_t));
-
- global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
-}
-
-static void change_syscalls()
-{
- static char *lfork = { "do_fork" };
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_FORK] = lfork;
-}
-
-/**
- * Set local variables
- *
- */
-static void set_local_pointers()
-{
- if (isrh >= NETDATA_MINIMUM_RH_VERSION && isrh < NETDATA_RH_8)
- change_syscalls();
-}
-
-/*****************************************************************
- *
- * EBPF PROCESS THREAD
- *
- *****************************************************************/
-
-/**
- * Enable tracepoints
- *
- * Enable necessary tracepoints for thread.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-static int ebpf_process_enable_tracepoints()
-{
- int test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exit);
- if (test == -1)
- return -1;
- else if (!test) {
- if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit))
- return -1;
- }
- was_sched_process_exit_enabled = test;
-
- test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exec);
- if (test == -1)
- return -1;
- else if (!test) {
- if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec))
- return -1;
- }
- was_sched_process_exec_enabled = test;
-
- test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_fork);
- if (test == -1)
- return -1;
- else if (!test) {
- if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork))
- return -1;
- }
- was_sched_process_fork_enabled = test;
-
- return 0;
-}
-
-/**
- * Process thread
- *
- * Thread used to generate process charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_process_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_process_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = process_maps;
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_process_enable_tracepoints()) {
- em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- pthread_mutex_lock(&lock);
- ebpf_process_allocate_global_vectors(NETDATA_KEY_PUBLISH_PROCESS_END);
-
- ebpf_update_pid_table(&process_maps[0], em);
-
- set_local_pointers();
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
- }
-
- int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
- };
-
- ebpf_global_labels(
- process_aggregated_data, process_publish_aggregated, process_dimension_names, process_id_names,
- algorithms, NETDATA_KEY_PUBLISH_PROCESS_END);
-
- ebpf_create_global_charts(em);
-
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- process_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_PROC_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- process_collector(em);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- ebpf_update_disabled_plugin_stats(em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
deleted file mode 100644
index 310b321d6..000000000
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_PROCESS_H
-#define NETDATA_EBPF_PROCESS_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_PROCESS "process"
-#define NETDATA_EBPF_MODULE_PROCESS_DESC "Monitor information about process life. This thread is integrated with apps and cgroup."
-
-// Groups used on Dashboard
-#define NETDATA_PROCESS_GROUP "processes"
-#define NETDATA_PROCESS_CGROUP_GROUP "processes (eBPF)"
-
-// Global chart name
-#define NETDATA_EXIT_SYSCALL "exit"
-#define NETDATA_PROCESS_SYSCALL "process_thread"
-#define NETDATA_PROCESS_ERROR_NAME "task_error"
-#define NETDATA_PROCESS_STATUS_NAME "process_status"
-
-// Charts created on Apps submenu
-#define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create"
-#define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create"
-#define NETDATA_SYSCALL_APPS_TASK_EXIT "task_exit"
-#define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close"
-#define NETDATA_SYSCALL_APPS_TASK_ERROR "task_error"
-
-// Process configuration name
-#define NETDATA_PROCESS_CONFIG_FILE "process.conf"
-
-// Contexts
-#define NETDATA_CGROUP_PROCESS_CREATE_CONTEXT "cgroup.process_create"
-#define NETDATA_CGROUP_THREAD_CREATE_CONTEXT "cgroup.thread_create"
-#define NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT "cgroup.task_close"
-#define NETDATA_CGROUP_PROCESS_EXIT_CONTEXT "cgroup.task_exit"
-#define NETDATA_CGROUP_PROCESS_ERROR_CONTEXT "cgroup.task_error"
-
-#define NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT "services.process_create"
-#define NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT "services.thread_create"
-#define NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT "services.task_close"
-#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit"
-#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error"
-
-#define NETDATA_EBPF_CGROUP_UPDATE 30
-
-enum netdata_ebpf_stats_order {
- NETDATA_EBPF_ORDER_STAT_THREADS = 140000,
- NETDATA_EBPF_ORDER_STAT_LIFE_TIME,
- NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
- NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
- NETDATA_EBPF_ORDER_STAT_HASH_TABLES,
- NETDATA_EBPF_ORDER_STAT_HASH_CORE,
- NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED,
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED,
- NETATA_EBPF_ORDER_STAT_ARAL_BEGIN,
- NETDATA_EBPF_ORDER_FUNCTION_PER_THREAD,
-};
-
-enum netdata_ebpf_load_mode_stats{
- NETDATA_EBPF_LOAD_STAT_LEGACY,
- NETDATA_EBPF_LOAD_STAT_CORE,
-
- NETDATA_EBPF_LOAD_STAT_END
-};
-
-enum netdata_ebpf_thread_per_core{
- NETDATA_EBPF_THREAD_PER_CORE,
- NETDATA_EBPF_THREAD_UNIQUE,
-
- NETDATA_EBPF_PER_CORE_END
-};
-
-// Index from kernel
-typedef enum ebpf_process_index {
- NETDATA_KEY_CALLS_DO_EXIT,
-
- NETDATA_KEY_CALLS_RELEASE_TASK,
-
- NETDATA_KEY_CALLS_DO_FORK,
- NETDATA_KEY_ERROR_DO_FORK,
-
- NETDATA_KEY_CALLS_SYS_CLONE,
- NETDATA_KEY_ERROR_SYS_CLONE,
-
- NETDATA_KEY_END_VECTOR
-} ebpf_process_index_t;
-
-// This enum acts as an index for publish vector.
-// Do not change the enum order because we use
-// different algorithms to make charts with incremental
-// values (the three initial positions) and absolute values
-// (the remaining charts).
-typedef enum netdata_publish_process {
- NETDATA_KEY_PUBLISH_PROCESS_EXIT,
- NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK,
- NETDATA_KEY_PUBLISH_PROCESS_FORK,
- NETDATA_KEY_PUBLISH_PROCESS_CLONE,
-
- NETDATA_KEY_PUBLISH_PROCESS_END
-} netdata_publish_process_t;
-
-enum ebpf_process_tables {
- NETDATA_PROCESS_PID_TABLE,
- NETDATA_PROCESS_GLOBAL_TABLE,
- NETDATA_PROCESS_CTRL_TABLE
-};
-
-extern struct config process_config;
-
-#endif /* NETDATA_EBPF_PROCESS_H */
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
deleted file mode 100644
index f14eb67d0..000000000
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ /dev/null
@@ -1,1327 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_shm.h"
-
-static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" };
-static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END];
-static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END];
-
-netdata_publish_shm_t *shm_vector = NULL;
-
-static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
-static netdata_idx_t *shm_values = NULL;
-
-struct config shm_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "shm_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tbl_shm", .internal_input = NETDATA_SHM_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0}};
-
-netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "shmat", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "shmdt", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "shmctl", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef NETDATA_DEV_MODE
-int shm_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/*****************************************************************
- *
- * BTF FUNCTIONS
- *
- *****************************************************************/
-
-/*
- * Disable tracepoint
- *
- * Disable all tracepoints to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_shm_disable_tracepoint(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_syscall_shmget, false);
- bpf_program__set_autoload(obj->progs.netdata_syscall_shmat, false);
- bpf_program__set_autoload(obj->progs.netdata_syscall_shmdt, false);
- bpf_program__set_autoload(obj->progs.netdata_syscall_shmctl, false);
-}
-
-/*
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_disable_probe(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_shmget_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shmat_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shmdt_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shmctl_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_disable_trampoline(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_shmget_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shmat_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shmdt_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shmctl_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_set_trampoline_target(struct shm_bpf *obj)
-{
- char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH + 1];
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMGET_CALL].name, running_on_kernel);
-
- bpf_program__set_attach_target(obj->progs.netdata_shmget_fentry, 0,
- syscall);
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMAT_CALL].name, running_on_kernel);
- bpf_program__set_attach_target(obj->progs.netdata_shmat_fentry, 0,
- syscall);
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMDT_CALL].name, running_on_kernel);
- bpf_program__set_attach_target(obj->progs.netdata_shmdt_fentry, 0,
- syscall);
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
- bpf_program__set_attach_target(obj->progs.netdata_shmctl_fentry, 0,
- syscall);
-
- bpf_program__set_attach_target(obj->progs.netdata_shm_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
-}
-
-/**
- * SHM Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_shm_attach_probe(struct shm_bpf *obj)
-{
- char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH + 1];
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMGET_CALL].name, running_on_kernel);
-
- obj->links.netdata_shmget_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmget_probe,
- false, syscall);
- int ret = (int)libbpf_get_error(obj->links.netdata_shmget_probe);
- if (ret)
- return -1;
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMAT_CALL].name, running_on_kernel);
- obj->links.netdata_shmat_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmat_probe,
- false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmat_probe);
- if (ret)
- return -1;
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMDT_CALL].name, running_on_kernel);
- obj->links.netdata_shmdt_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmdt_probe,
- false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmdt_probe);
- if (ret)
- return -1;
-
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH,
- shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
- obj->links.netdata_shmctl_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmctl_probe,
- false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmctl_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_shm_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_shm_release_task_probe,
- false, EBPF_COMMON_FNCT_CLEAN_UP);
- ret = (int)libbpf_get_error(obj->links.netdata_shm_release_task_probe);
- if (ret)
- return -1;
-
-
- return 0;
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- */
-static void ebpf_shm_set_hash_tables(struct shm_bpf *obj)
-{
- shm_maps[NETDATA_PID_SHM_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_pid_shm);
- shm_maps[NETDATA_SHM_CONTROLLER].map_fd = bpf_map__fd(obj->maps.shm_ctrl);
- shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_shm);
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_shm_disable_release_task(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
-}
-
-/**
- * Adjust Map Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_shm_adjust_map(struct shm_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.tbl_pid_shm, &shm_maps[NETDATA_PID_SHM_TABLE],
- em, bpf_map__name(obj->maps.tbl_pid_shm));
-
- ebpf_update_map_type(obj->maps.tbl_shm, &shm_maps[NETDATA_SHM_GLOBAL_TABLE]);
- ebpf_update_map_type(obj->maps.tbl_pid_shm, &shm_maps[NETDATA_PID_SHM_TABLE]);
- ebpf_update_map_type(obj->maps.shm_ctrl, &shm_maps[NETDATA_SHM_CONTROLLER]);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *shmt = em->targets;
- netdata_ebpf_program_loaded_t test = shmt[NETDATA_KEY_SHMGET_CALL].mode;
-
- // We are testing only one, because all will have the same behavior
- if (test == EBPF_LOAD_TRAMPOLINE ) {
- ebpf_shm_disable_tracepoint(obj);
- ebpf_disable_probe(obj);
-
- ebpf_set_trampoline_target(obj);
- } else if (test == EBPF_LOAD_PROBE || test == EBPF_LOAD_RETPROBE ) {
- ebpf_shm_disable_tracepoint(obj);
- ebpf_disable_trampoline(obj);
- } else {
- ebpf_disable_probe(obj);
- ebpf_disable_trampoline(obj);
- }
-
- ebpf_shm_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_shm_disable_release_task(obj);
-
- int ret = shm_bpf__load(obj);
- if (!ret) {
- if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE)
- shm_bpf__attach(obj);
- else
- ret = ebpf_shm_attach_probe(obj);
-
- if (!ret)
- ebpf_shm_set_hash_tables(obj);
- }
-
- return ret;
-}
-#endif
-/*****************************************************************
- * FUNCTIONS TO CLOSE THE THREAD
- *****************************************************************/
-
-static void ebpf_obsolete_specific_shm_charts(char *type, int update_every);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_shm_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SHMGET_CHART,
- "",
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20191,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SHMAT_CHART,
- "",
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20192,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SHMDT_CHART,
- "",
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20193,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SHMCTL_CHART,
- "",
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20193,
- em->update_every);
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_shm_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_shm_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_shm_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmget_call",
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmget_call",
- 20191,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmat_call",
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmat_call",
- 20192,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmdt_call",
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmdt_call",
- 20193,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmctl_call",
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmctl_call",
- 20194,
- update_every);
-
- w->charts_created &= ~(1<<EBPF_MODULE_SHM_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_shm_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_SHM_GLOBAL_CHART,
- "",
- "Calls to shared memory system calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_IPC_SHM_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS,
- em->update_every);
-}
-
-/**
- * SHM Exit
- *
- * Cancel child thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_shm_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_shm_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_shm_apps_charts(em);
- }
-
- ebpf_obsolete_shm_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_statistic_obsolete_aral_chart(em, shm_disable_priority);
-#endif
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (shm_bpf_obj) {
- shm_bpf__destroy(shm_bpf_obj);
- shm_bpf_obj = NULL;
- }
-#endif
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- * COLLECTOR THREAD
- *****************************************************************/
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_publish_shm_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_publish_shm_t *w = &out[i];
- total->get += w->get;
- total->at += w->at;
- total->dt += w->dt;
- total->ctl += w->ctl;
- }
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
-{
- netdata_publish_shm_t *curr = shm_pid[current_pid];
- if (!curr) {
- curr = ebpf_shm_stat_get( );
- shm_pid[current_pid] = curr;
- }
-
- memcpy(curr, publish, sizeof(netdata_publish_shm_t));
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_shm_cgroup(int maps_per_core)
-{
- netdata_publish_shm_t *cv = shm_vector;
- int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_shm_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- ebpf_cgroup_target_t *ect;
-
- memset(cv, 0, length);
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_publish_shm_t *out = &pids->shm;
- if (likely(shm_pid) && shm_pid[pid]) {
- netdata_publish_shm_t *in = shm_pid[pid];
-
- memcpy(out, in, sizeof(netdata_publish_shm_t));
- } else {
- if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- shm_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_publish_shm_t));
-
- // now that we've consumed the value, zero it out in the map.
- memset(cv, 0, length);
- bpf_map_update_elem(fd, &pid, cv, BPF_EXIST);
- }
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Read APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_shm_apps_table(int maps_per_core)
-{
- netdata_publish_shm_t *cv = shm_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_shm_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
- }
-
- shm_apps_accumulator(cv, maps_per_core);
-
- shm_fill_pid(key, cv);
-
- // now that we've consumed the value, zero it out in the map.
- memset(cv, 0, length);
- bpf_map_update_elem(fd, &key, cv, BPF_EXIST);
-
- pids = pids->next;
- }
-}
-
-/**
-* Send global charts to netdata agent.
-*/
-static void shm_send_global()
-{
- ebpf_write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_SHM_GLOBAL_CHART, "");
- write_chart_dimension(
- shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].dimension,
- (long long) shm_hash_values[NETDATA_KEY_SHMGET_CALL]
- );
- write_chart_dimension(
- shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].dimension,
- (long long) shm_hash_values[NETDATA_KEY_SHMAT_CALL]
- );
- write_chart_dimension(
- shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].dimension,
- (long long) shm_hash_values[NETDATA_KEY_SHMDT_CALL]
- );
- write_chart_dimension(
- shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].dimension,
- (long long) shm_hash_values[NETDATA_KEY_SHMCTL_CALL]
- );
- ebpf_write_end_chart();
-}
-
-/**
- * Read global counter
- *
- * Read the table with number of calls for all functions
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
-{
- ebpf_read_global_table_stats(shm_hash_values,
- shm_values,
- shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd,
- maps_per_core,
- NETDATA_KEY_SHMGET_CALL,
- NETDATA_SHM_END);
-
- ebpf_read_global_table_stats(stats,
- shm_values,
- shm_maps[NETDATA_SHM_CONTROLLER].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-}
-
-/**
- * Sum values for all targets.
- */
-static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_target *root)
-{
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_shm_t *w = shm_pid[pid];
- if (w) {
- shm->get += w->get;
- shm->at += w->at;
- shm->dt += w->dt;
- shm->ctl += w->ctl;
-
- // reset for next collection.
- w->get = 0;
- w->at = 0;
- w->dt = 0;
- w->ctl = 0;
- }
- root = root->next;
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param root the target list.
-*/
-void ebpf_shm_send_apps_data(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
- continue;
-
- ebpf_shm_sum_pids(&w->shm, w->root_pid);
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmget_call");
- write_chart_dimension("calls", (long long) w->shm.get);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmat_call");
- write_chart_dimension("calls", (long long) w->shm.at);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmdt_call");
- write_chart_dimension("calls", (long long) w->shm.dt);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmctl_call");
- write_chart_dimension("calls", (long long) w->shm.ctl);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Sum values for all targets.
- */
-static void ebpf_shm_sum_cgroup_pids(netdata_publish_shm_t *shm, struct pid_on_target2 *root)
-{
- netdata_publish_shm_t shmv;
- memset(&shmv, 0, sizeof(shmv));
- while (root) {
- netdata_publish_shm_t *w = &root->shm;
- shmv.get += w->get;
- shmv.at += w->at;
- shmv.dt += w->dt;
- shmv.ctl += w->ctl;
-
- root = root->next;
- }
-
- memcpy(shm, &shmv, sizeof(shmv));
-}
-
-/**
- * Create specific shared memory charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_shm_charts(char *type, int update_every)
-{
- ebpf_create_chart(type, NETDATA_SHMGET_CHART,
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_CGROUP_SHM_GET_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800,
- ebpf_create_global_dimension,
- &shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL],
- 1,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
-
- ebpf_create_chart(type, NETDATA_SHMAT_CHART,
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_CGROUP_SHM_AT_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801,
- ebpf_create_global_dimension,
- &shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL],
- 1,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
-
- ebpf_create_chart(type, NETDATA_SHMDT_CHART,
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_CGROUP_SHM_DT_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802,
- ebpf_create_global_dimension,
- &shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL],
- 1,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
-
- ebpf_create_chart(type, NETDATA_SHMCTL_CHART,
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_CGROUP_SHM_CTL_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803,
- ebpf_create_global_dimension,
- &shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL],
- 1,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
-}
-
-/**
- * Obsolete specific shared memory charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_shm_charts(char *type, int update_every)
-{
- ebpf_write_chart_obsolete(type, NETDATA_SHMGET_CHART,
- "",
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_GET_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SHMAT_CHART,
- "",
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_AT_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SHMDT_CHART,
- "",
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_DT_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SHMCTL_CHART,
- "",
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_CTL_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803, update_every);
-}
-
-/**
- * Create Systemd Swap Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_shm_charts(int update_every)
-{
- ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART,
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART,
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20192,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART,
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART,
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_CTL_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_shm_charts()
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.get);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.at);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.dt);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl);
- }
- }
- ebpf_write_end_chart();
-}
-
-/*
- * Send Specific Shared memory data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- */
-static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *values)
-{
- ebpf_write_begin_chart(type, NETDATA_SHMGET_CHART, "");
- write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].name, (long long)values->get);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SHMAT_CHART, "");
- write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].name, (long long)values->at);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SHMDT_CHART, "");
- write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].name, (long long)values->dt);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SHMCTL_CHART, "");
- write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].name, (long long)values->ctl);
- ebpf_write_end_chart();
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-void ebpf_shm_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_shm_sum_cgroup_pids(&ect->publish_shm, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_shm_charts(update_every);
- }
-
- ebpf_send_systemd_shm_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) && ect->updated) {
- ebpf_create_specific_shm_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_SHM_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) {
- if (ect->updated) {
- ebpf_send_specific_shm_data(ect->name, &ect->publish_shm);
- } else {
- ebpf_obsolete_specific_shm_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void shm_collector(ebpf_module_t *em)
-{
- int cgroups = em->cgroup_charts;
- int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_shm_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps) {
- read_shm_apps_table(maps_per_core);
- }
-
- if (cgroups) {
- ebpf_update_shm_cgroup(maps_per_core);
- }
-
- pthread_mutex_lock(&lock);
-
- shm_send_global();
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_shm_send_apps_data(apps_groups_root_target);
- }
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_send_data_aral_chart(ebpf_aral_shm_pid, em);
-#endif
-
- if (cgroups) {
- ebpf_shm_send_cgroup_data(update_every);
- }
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- * INITIALIZE THREAD
- *****************************************************************/
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmget_call",
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmget_call",
- 20191,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmat_call",
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmat_call",
- 20192,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmdt_call",
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmdt_call",
- 20193,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_shmctl_call",
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_shmctl_call",
- 20194,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SHM);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- w->charts_created |= 1<<EBPF_MODULE_SHM_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/**
- * Allocate vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_shm_allocate_global_vectors(int apps)
-{
- if (apps) {
- ebpf_shm_aral_init();
- shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
- shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
- }
-
- shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-
- memset(shm_hash_values, 0, sizeof(shm_hash_values));
-}
-
-/*****************************************************************
- * MAIN THREAD
- *****************************************************************/
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_shm_charts(int update_every)
-{
- ebpf_create_chart(
- NETDATA_EBPF_SYSTEM_GROUP,
- NETDATA_SHM_GLOBAL_CHART,
- "Calls to shared memory system calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_IPC_SHM_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS,
- ebpf_create_global_dimension,
- shm_publish_aggregated,
- NETDATA_SHM_END,
- update_every, NETDATA_EBPF_MODULE_NAME_SHM
- );
-
- fflush(stdout);
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_shm_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
-
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SHMGET_CALL].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- shm_bpf_obj = shm_bpf__open();
- if (!shm_bpf_obj)
- ret = -1;
- else
- ret = ebpf_shm_load_and_attach(shm_bpf_obj, em);
- }
-#endif
-
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * Shared memory thread.
- *
- * @param ptr a pointer to `struct ebpf_module`
- * @return It always return NULL
- */
-void *ebpf_shm_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_shm_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = shm_maps;
-
- ebpf_update_pid_table(&shm_maps[NETDATA_PID_SHM_TABLE], em);
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_shm_load_bpf(em)) {
- goto endshm;
- }
-
- ebpf_shm_allocate_global_vectors(em->apps_charts);
-
- int algorithms[NETDATA_SHM_END] = {
- NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX
- };
- ebpf_global_labels(
- shm_aggregated_data,
- shm_publish_aggregated,
- shm_dimension_name,
- shm_dimension_name,
- algorithms,
- NETDATA_SHM_END
- );
-
- pthread_mutex_lock(&lock);
- ebpf_create_shm_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- shm_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_SHM_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- shm_collector(em);
-
-endshm:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h
deleted file mode 100644
index a415006e6..000000000
--- a/collectors/ebpf.plugin/ebpf_shm.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_SHM_H
-#define NETDATA_EBPF_SHM_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_SHM "shm"
-#define NETDATA_EBPF_SHM_MODULE_DESC "Show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2). This thread is integrated with apps and cgroup."
-
-// charts
-#define NETDATA_SHM_GLOBAL_CHART "shared_memory_calls"
-#define NETDATA_SHMGET_CHART "shmget_call"
-#define NETDATA_SHMAT_CHART "shmat_call"
-#define NETDATA_SHMDT_CHART "shmdt_call"
-#define NETDATA_SHMCTL_CHART "shmctl_call"
-
-// configuration file
-#define NETDATA_DIRECTORY_SHM_CONFIG_FILE "shm.conf"
-
-// Contexts
-#define NETDATA_CGROUP_SHM_GET_CONTEXT "cgroup.shmget"
-#define NETDATA_CGROUP_SHM_AT_CONTEXT "cgroup.shmat"
-#define NETDATA_CGROUP_SHM_DT_CONTEXT "cgroup.shmdt"
-#define NETDATA_CGROUP_SHM_CTL_CONTEXT "cgroup.shmctl"
-
-#define NETDATA_SYSTEMD_SHM_GET_CONTEXT "services.shmget"
-#define NETDATA_SYSTEMD_SHM_AT_CONTEXT "services.shmat"
-#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "services.shmdt"
-#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "services.shmctl"
-
-// ARAL name
-#define NETDATA_EBPF_SHM_ARAL_NAME "ebpf_shm"
-
-typedef struct netdata_publish_shm {
- uint64_t get;
- uint64_t at;
- uint64_t dt;
- uint64_t ctl;
-} netdata_publish_shm_t;
-
-enum shm_tables {
- NETDATA_PID_SHM_TABLE,
- NETDATA_SHM_CONTROLLER,
- NETDATA_SHM_GLOBAL_TABLE
-};
-
-enum shm_counters {
- NETDATA_KEY_SHMGET_CALL,
- NETDATA_KEY_SHMAT_CALL,
- NETDATA_KEY_SHMDT_CALL,
- NETDATA_KEY_SHMCTL_CALL,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_SHM_END
-};
-
-void *ebpf_shm_thread(void *ptr);
-void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_shm_release(netdata_publish_shm_t *stat);
-extern netdata_ebpf_targets_t shm_targets[];
-
-extern struct config shm_config;
-
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
deleted file mode 100644
index bbb5dca1b..000000000
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ /dev/null
@@ -1,2895 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/resource.h>
-
-#include "ebpf.h"
-#include "ebpf_socket.h"
-
-/*****************************************************************
- *
- * GLOBAL VARIABLES
- *
- *****************************************************************/
-
-static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "received", "sent", "close",
- "received", "sent", "retransmitted",
- "connected_V4", "connected_V6", "connected_tcp",
- "connected_udp"};
-static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_cleanup_rbuf", "tcp_sendmsg", "tcp_close",
- "udp_recvmsg", "udp_sendmsg", "tcp_retransmit_skb",
- "tcp_connect_v4", "tcp_connect_v6", "inet_csk_accept_tcp",
- "inet_csk_accept_udp" };
-
-static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_global_sock",
- .internal_input = NETDATA_SOCKET_COUNTER,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tbl_lports",
- .internal_input = NETDATA_SOCKET_COUNTER,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tbl_nd_socket",
- .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tbl_nv_udp",
- .internal_input = NETDATA_COMPILED_UDP_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "socket_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-static netdata_idx_t *socket_hash_values = NULL;
-static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR];
-static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR];
-
-netdata_socket_t *socket_values;
-
-ebpf_network_viewer_port_list_t *listen_ports = NULL;
-ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0};
-
-struct config socket_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_cleanup_rbuf", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_close", .mode = EBPF_LOAD_PROBE},
- {.name = "udp_recvmsg", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_sendmsg", .mode = EBPF_LOAD_PROBE},
- {.name = "udp_sendmsg", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_v4_connect", .mode = EBPF_LOAD_PROBE},
- {.name = "tcp_v6_connect", .mode = EBPF_LOAD_PROBE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-struct netdata_static_thread ebpf_read_socket = {
- .name = "EBPF_READ_SOCKET",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
-ARAL *aral_socket_table = NULL;
-
-#ifdef NETDATA_DEV_MODE
-int socket_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable Probe
- *
- * Disable probes to use trampoline.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_socket_disable_probes(struct socket_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_inet_csk_accept_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_retransmit_skb_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_cleanup_rbuf_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_close_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_recvmsg_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_recvmsg_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kprobe, false);
-}
-
-/**
- * Disable Trampoline
- *
- * Disable trampoline to use probes.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_socket_disable_trampoline(struct socket_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_inet_csk_accept_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_retransmit_skb_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_cleanup_rbuf_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_close_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_recvmsg_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_recvmsg_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fexit, false);
-}
-
-/**
- * Set trampoline target.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_set_trampoline_target(struct socket_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_inet_csk_accept_fexit, 0,
- socket_targets[NETDATA_FCNT_INET_CSK_ACCEPT].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_v4_connect_fentry, 0,
- socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_v4_connect_fexit, 0,
- socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name);
-
- if (tcp_v6_connect_address.type == 'T') {
- bpf_program__set_attach_target(
- obj->progs.netdata_tcp_v6_connect_fentry, 0, socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_v6_connect_fexit, 0,
- socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name);
- }
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_retransmit_skb_fentry, 0,
- socket_targets[NETDATA_FCNT_TCP_RETRANSMIT].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_cleanup_rbuf_fentry, 0,
- socket_targets[NETDATA_FCNT_CLEANUP_RBUF].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_close_fentry, 0,
- socket_targets[NETDATA_FCNT_TCP_CLOSE].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_udp_recvmsg_fentry, 0,
- socket_targets[NETDATA_FCNT_UDP_RECEVMSG].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_udp_recvmsg_fexit, 0,
- socket_targets[NETDATA_FCNT_UDP_RECEVMSG].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_sendmsg_fentry, 0,
- socket_targets[NETDATA_FCNT_TCP_SENDMSG].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_tcp_sendmsg_fexit, 0,
- socket_targets[NETDATA_FCNT_TCP_SENDMSG].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_udp_sendmsg_fentry, 0,
- socket_targets[NETDATA_FCNT_UDP_SENDMSG].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_udp_sendmsg_fexit, 0,
- socket_targets[NETDATA_FCNT_UDP_SENDMSG].name);
-}
-
-
-/**
- * Disable specific trampoline
- *
- * Disable specific trampoline to match user selection.
- *
- * @param obj is the main structure for bpf objects.
- * @param sel option selected by user.
- */
-static inline void ebpf_socket_disable_specific_trampoline(struct socket_bpf *obj, netdata_run_mode_t sel)
-{
- if (sel == MODE_RETURN) {
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fentry, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_fexit, false);
- }
-}
-
-/**
- * Disable specific probe
- *
- * Disable specific probe to match user selection.
- *
- * @param obj is the main structure for bpf objects.
- * @param sel option selected by user.
- */
-static inline void ebpf_socket_disable_specific_probe(struct socket_bpf *obj, netdata_run_mode_t sel)
-{
- if (sel == MODE_RETURN) {
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kprobe, false);
- } else {
- bpf_program__set_autoload(obj->progs.netdata_tcp_sendmsg_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v4_connect_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_udp_sendmsg_kretprobe, false);
- }
-}
-
-/**
- * Attach probes
- *
- * Attach probes to targets.
- *
- * @param obj is the main structure for bpf objects.
- * @param sel option selected by user.
- */
-static long ebpf_socket_attach_probes(struct socket_bpf *obj, netdata_run_mode_t sel)
-{
- obj->links.netdata_inet_csk_accept_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_inet_csk_accept_kretprobe,
- true,
- socket_targets[NETDATA_FCNT_INET_CSK_ACCEPT].name);
- long ret = libbpf_get_error(obj->links.netdata_inet_csk_accept_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_tcp_retransmit_skb_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_retransmit_skb_kprobe,
- false,
- socket_targets[NETDATA_FCNT_TCP_RETRANSMIT].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_retransmit_skb_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_tcp_cleanup_rbuf_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_cleanup_rbuf_kprobe,
- false,
- socket_targets[NETDATA_FCNT_CLEANUP_RBUF].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_cleanup_rbuf_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_tcp_close_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_close_kprobe,
- false,
- socket_targets[NETDATA_FCNT_TCP_CLOSE].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_close_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_udp_recvmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_udp_recvmsg_kprobe,
- false,
- socket_targets[NETDATA_FCNT_UDP_RECEVMSG].name);
- ret = libbpf_get_error(obj->links.netdata_udp_recvmsg_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_udp_recvmsg_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_udp_recvmsg_kretprobe,
- true,
- socket_targets[NETDATA_FCNT_UDP_RECEVMSG].name);
- ret = libbpf_get_error(obj->links.netdata_udp_recvmsg_kretprobe);
- if (ret)
- return -1;
-
- if (sel == MODE_RETURN) {
- obj->links.netdata_tcp_sendmsg_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_sendmsg_kretprobe,
- true,
- socket_targets[NETDATA_FCNT_TCP_SENDMSG].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_sendmsg_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_udp_sendmsg_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_udp_sendmsg_kretprobe,
- true,
- socket_targets[NETDATA_FCNT_UDP_SENDMSG].name);
- ret = libbpf_get_error(obj->links.netdata_udp_sendmsg_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_tcp_v4_connect_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v4_connect_kretprobe,
- true,
- socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_v4_connect_kretprobe);
- if (ret)
- return -1;
-
- if (tcp_v6_connect_address.type == 'T') {
- obj->links.netdata_tcp_v6_connect_kretprobe = bpf_program__attach_kprobe(
- obj->progs.netdata_tcp_v6_connect_kretprobe, true, socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_v6_connect_kretprobe);
- if (ret)
- return -1;
- }
- } else {
- obj->links.netdata_tcp_sendmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_sendmsg_kprobe,
- false,
- socket_targets[NETDATA_FCNT_TCP_SENDMSG].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_sendmsg_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_udp_sendmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_udp_sendmsg_kprobe,
- false,
- socket_targets[NETDATA_FCNT_UDP_SENDMSG].name);
- ret = libbpf_get_error(obj->links.netdata_udp_sendmsg_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_tcp_v4_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v4_connect_kprobe,
- false,
- socket_targets[NETDATA_FCNT_TCP_V4_CONNECT].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_v4_connect_kprobe);
- if (ret)
- return -1;
-
- if (tcp_v6_connect_address.type == 'T') {
- obj->links.netdata_tcp_v6_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_tcp_v6_connect_kprobe,
- false,
- socket_targets[NETDATA_FCNT_TCP_V6_CONNECT].name);
- ret = libbpf_get_error(obj->links.netdata_tcp_v6_connect_kprobe);
- if (ret)
- return -1;
- }
- }
-
- return 0;
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_socket_set_hash_tables(struct socket_bpf *obj)
-{
- socket_maps[NETDATA_SOCKET_GLOBAL].map_fd = bpf_map__fd(obj->maps.tbl_global_sock);
- socket_maps[NETDATA_SOCKET_LPORTS].map_fd = bpf_map__fd(obj->maps.tbl_lports);
- socket_maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd = bpf_map__fd(obj->maps.tbl_nd_socket);
- socket_maps[NETDATA_SOCKET_TABLE_UDP].map_fd = bpf_map__fd(obj->maps.tbl_nv_udp);
- socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd = bpf_map__fd(obj->maps.socket_ctrl);
-}
-
-/**
- * Adjust Map Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_socket_adjust_map(struct socket_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.tbl_nd_socket, &socket_maps[NETDATA_SOCKET_OPEN_SOCKET],
- em, bpf_map__name(obj->maps.tbl_nd_socket));
-
- ebpf_update_map_size(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP],
- em, bpf_map__name(obj->maps.tbl_nv_udp));
-
- ebpf_update_map_type(obj->maps.tbl_nd_socket, &socket_maps[NETDATA_SOCKET_OPEN_SOCKET]);
- ebpf_update_map_type(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP]);
- ebpf_update_map_type(obj->maps.socket_ctrl, &socket_maps[NETDATA_SOCKET_TABLE_CTRL]);
- ebpf_update_map_type(obj->maps.tbl_global_sock, &socket_maps[NETDATA_SOCKET_GLOBAL]);
- ebpf_update_map_type(obj->maps.tbl_lports, &socket_maps[NETDATA_SOCKET_LPORTS]);
-}
-
-/**
- * Disable TCP V6 connect
- */
-static void ebpf_disable_tcp_v6_connect(struct socket_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_tcp_v6_connect_fentry, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_FCNT_INET_CSK_ACCEPT].mode;
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_socket_disable_probes(obj);
-
- ebpf_set_trampoline_target(obj);
- ebpf_socket_disable_specific_trampoline(obj, em->mode);
- } else { // We are not using tracepoints for this thread.
- ebpf_socket_disable_trampoline(obj);
-
- ebpf_socket_disable_specific_probe(obj, em->mode);
- }
-
- ebpf_socket_adjust_map(obj, em);
-
- if (tcp_v6_connect_address.type != 'T') {
- ebpf_disable_tcp_v6_connect(obj);
- }
-
- int ret = socket_bpf__load(obj);
- if (ret) {
- fprintf(stderr, "failed to load BPF object: %d\n", ret);
- return ret;
- }
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ret = socket_bpf__attach(obj);
- } else {
- ret = (int)ebpf_socket_attach_probes(obj, em->mode);
- }
-
- if (!ret) {
- ebpf_socket_set_hash_tables(obj);
-
- ebpf_update_controller(socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd, em);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-/**
- * Socket Free
- *
- * Cleanup variables after child threads to stop
- *
- * @param ptr thread data.
- */
-static void ebpf_socket_free(ebpf_module_t *em )
-{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/**
- * Obsolete Systemd Socket Charts
- *
- * Obsolete charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_obsolete_systemd_socket_charts(int update_every)
-{
- int order = 20080;
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_CONNECTION_TCP_V4,
- "",
- "Calls to tcp_v4_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT,
- order++,
- update_every);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "",
- "Calls to tcp_v6_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT,
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_RECV,
- "",
- "Bytes received",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_SENT,
- "",
- "Bytes sent",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
- "",
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
- "",
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
- "",
- "Calls to tcp_retransmit",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
- "",
- "Calls to udp_sendmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT,
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
- "",
- "Calls to udp_recvmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT,
- order++,
- update_every);
-}
-
-static void ebpf_obsolete_specific_socket_charts(char *type, int update_every);
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_socket_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_systemd_socket_charts(em->update_every);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_socket_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_socket_obsolete_apps_charts(struct ebpf_module *em)
-{
- int order = 20130;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SOCKET_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_v4_connection",
- "Calls to tcp_v4_connection.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_v4_connection",
- order++,
- update_every);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_v6_connection",
- "Calls to tcp_v6_connection.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_v6_connection",
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_sock_bytes_sent",
- "Bytes sent.",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_sock_bytes_sent",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_sock_bytes_received",
- "Bytes received.",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_sock_bytes_received",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_sendmsg",
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_sendmsg",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_cleanup_rbuf",
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_cleanup_rbuf",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_retransmit",
- "Calls to tcp_retransmit.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_retransmit",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_udp_sendmsg",
- "Calls to udp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_udp_sendmsg",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_udp_recvmsg",
- "Calls to udp_recvmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_udp_recvmsg",
- order++,
- update_every);
-
- w->charts_created &= ~(1<<EBPF_MODULE_SOCKET_IDX);
- }
-}
-
-/**
- * Obsolete global charts
- *
- * Obsolete charts created.
- *
- * @param em a pointer to the structure with the default values.
- */
-static void ebpf_socket_obsolete_global_charts(ebpf_module_t *em)
-{
- int order = 21070;
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_INBOUND_CONNECTIONS,
- "",
- "Inbound connections.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_OUTBOUND_CONNECTIONS,
- "",
- "TCP outbound connections.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_FUNCTION_COUNT,
- "",
- "Calls to internal functions",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_FUNCTION_BITS,
- "",
- "TCP bandwidth",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_FUNCTION_ERROR,
- "",
- "TCP errors",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_RETRANSMIT,
- "",
- "Packages retransmitted",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_UDP_FUNCTION_COUNT,
- "",
- "UDP calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_UDP_FUNCTION_BITS,
- "",
- "UDP bandwidth",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_EBPF_IP_FAMILY,
- NETDATA_UDP_FUNCTION_ERROR,
- "",
- "UDP errors",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order++,
- em->update_every);
- }
-
- fflush(stdout);
-}
-/**
- * Socket exit
- *
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_socket_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (ebpf_read_socket.thread)
- netdata_thread_cancel(*ebpf_read_socket.thread);
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- if (em->cgroup_charts) {
- ebpf_obsolete_socket_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_socket_obsolete_apps_charts(em);
- fflush(stdout);
- }
-
- ebpf_socket_obsolete_global_charts(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- ebpf_statistic_obsolete_aral_chart(em, socket_disable_priority);
-#endif
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_socket_free(em);
-}
-
-/*****************************************************************
- *
- * PROCESS DATA AND SEND TO NETDATA
- *
- *****************************************************************/
-
-/**
- * Update publish structure before to send data to Netdata.
- *
- * @param publish the first output structure with independent dimensions
- * @param tcp structure to store IO from tcp sockets
- * @param udp structure to store IO from udp sockets
- * @param input the structure with the input data.
- */
-static void ebpf_update_global_publish(
- netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *tcp, netdata_publish_vfs_common_t *udp,
- netdata_syscall_stat_t *input)
-{
- netdata_publish_syscall_t *move = publish;
- while (move) {
- if (input->call != move->pcall) {
- // This condition happens to avoid initial values with dimensions higher than normal values.
- if (move->pcall) {
- move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
- move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
- move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
- } else {
- move->ncall = 0;
- move->nbyte = 0;
- move->nerr = 0;
- }
-
- move->pcall = input->call;
- move->pbyte = input->bytes;
- move->perr = input->ecall;
- } else {
- move->ncall = 0;
- move->nbyte = 0;
- move->nerr = 0;
- }
-
- input = input->next;
- move = move->next;
- }
-
- tcp->write = -(long)publish[0].nbyte;
- tcp->read = (long)publish[1].nbyte;
-
- udp->write = -(long)publish[3].nbyte;
- udp->read = (long)publish[4].nbyte;
-}
-
-/**
- * Send Global Inbound connection
- *
- * Send number of connections read per protocol.
- */
-static void ebpf_socket_send_global_inbound_conn()
-{
- uint64_t udp_conn = 0;
- uint64_t tcp_conn = 0;
- ebpf_network_viewer_port_list_t *move = listen_ports;
- while (move) {
- if (move->protocol == IPPROTO_TCP)
- tcp_conn += move->connections;
- else
- udp_conn += move->connections;
-
- move = move->next;
- }
-
- ebpf_write_begin_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_INBOUND_CONNECTIONS, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_TCP].name, (long long) tcp_conn);
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_UDP].name, (long long) udp_conn);
- ebpf_write_end_chart();
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- */
-static void ebpf_socket_send_data(ebpf_module_t *em)
-{
- netdata_publish_vfs_common_t common_tcp;
- netdata_publish_vfs_common_t common_udp;
- ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
-
- ebpf_socket_send_global_inbound_conn();
- write_count_chart(NETDATA_TCP_OUTBOUND_CONNECTIONS, NETDATA_EBPF_IP_FAMILY,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4], 2);
-
- // We read bytes from function arguments, but bandwidth is given in bits,
- // so we need to multiply by 8 to convert for the final value.
- write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 3);
- write_io_chart(NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, socket_id_names[0],
- common_tcp.read * 8/BITS_IN_A_KILOBIT, socket_id_names[1],
- common_tcp.write * 8/BITS_IN_A_KILOBIT);
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 2);
- }
- write_count_chart(NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_IP_FAMILY,
- &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],1);
-
- write_count_chart(NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY,
- &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],2);
- write_io_chart(NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY,
- socket_id_names[3], (long long)common_udp.read * 8/BITS_IN_A_KILOBIT,
- socket_id_names[4], (long long)common_udp.write * 8/BITS_IN_A_KILOBIT);
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY,
- &socket_publish_aggregated[NETDATA_UDP_START], 2);
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- * @param root the target list.
- */
-void ebpf_socket_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
-{
- UNUSED(em);
-
- struct ebpf_target *w;
- // This algorithm is improved in https://github.com/netdata/netdata/pull/16030
- collected_number values[9];
-
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SOCKET_IDX))))
- continue;
-
- struct ebpf_pid_on_target *move = w->root_pid;
- // Simplify algorithm, but others will appear only in https://github.com/netdata/netdata/pull/16030
- memset(values, 0, sizeof(values));
- while (move) {
- int32_t pid = move->pid;
- ebpf_socket_publish_apps_t *ws = socket_bandwidth_curr[pid];
- if (ws) {
- values[0] += (collected_number) ws->call_tcp_v4_connection;
- values[1] += (collected_number) ws->call_tcp_v6_connection;
- values[2] += (collected_number) ws->bytes_sent;
- values[3] += (collected_number) ws->bytes_received;
- values[4] += (collected_number) ws->call_tcp_sent;
- values[5] += (collected_number) ws->call_tcp_received;
- values[6] += (collected_number) ws->retransmit;
- values[7] += (collected_number) ws->call_udp_sent;
- values[8] += (collected_number) ws->call_udp_received;
- }
-
- move = move->next;
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_v4_connection");
- write_chart_dimension("connections", values[0]);
- ebpf_write_end_chart();
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_call_tcp_v6_connection");
- write_chart_dimension("calls", values[1]);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_sent");
- // We multiply by 0.008, because we read bytes, but we display bits
- write_chart_dimension("bandwidth", ((values[2])*8)/1000);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_received");
- // We multiply by 0.008, because we read bytes, but we display bits
- write_chart_dimension("bandwidth", ((values[3])*8)/1000);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_sendmsg");
- write_chart_dimension("calls", values[4]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_cleanup_rbuf");
- write_chart_dimension("calls", values[5]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_retransmit");
- write_chart_dimension("calls", values[6]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_sendmsg");
- write_chart_dimension("calls", values[7]);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_recvmsg");
- write_chart_dimension("calls", values[8]);
- ebpf_write_end_chart();
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param em a pointer to the structure with the default values.
- */
-static void ebpf_socket_create_global_charts(ebpf_module_t *em)
-{
- int order = 21070;
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_INBOUND_CONNECTIONS,
- "Inbound connections.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_INCOMING_CONNECTION_TCP],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_OUTBOUND_CONNECTIONS,
- "TCP outbound connections.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_FUNCTION_COUNT,
- "Calls to internal functions",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- socket_publish_aggregated,
- 3, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_FUNCTION_BITS,
- "TCP bandwidth", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- socket_publish_aggregated,
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_FUNCTION_ERROR,
- "TCP errors",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- socket_publish_aggregated,
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
- }
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_TCP_RETRANSMIT,
- "Packages retransmitted",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_UDP_FUNCTION_COUNT,
- "UDP calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_UDP_FUNCTION_BITS,
- "UDP bandwidth", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
- NETDATA_UDP_FUNCTION_ERROR,
- "UDP errors",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_KERNEL_FUNCTIONS,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- order++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
- }
-
- fflush(stdout);
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- * @param ptr a pointer for targets
- */
-void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int order = 20130;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_v4_connection",
- "Calls to tcp_v4_connection.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_v4_connection",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION connections '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_v6_connection",
- "Calls to tcp_v6_connection.",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_v6_connection",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION connections '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_sock_bytes_sent",
- "Bytes sent.",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_sock_bytes_sent",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION bandwidth '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_sock_bytes_received",
- "Bytes received.",
- EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_sock_bytes_received",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION bandwidth '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_sendmsg",
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_sendmsg",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_cleanup_rbuf",
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_cleanup_rbuf",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_tcp_retransmit",
- "Calls to tcp_retransmit.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_tcp_retransmit",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_udp_sendmsg",
- "Calls to udp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_udp_sendmsg",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_udp_recvmsg",
- "Calls to udp_recvmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_udp_recvmsg",
- order,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- w->charts_created |= 1<<EBPF_MODULE_SOCKET_IDX;
- }
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/*****************************************************************
- *
- * READ INFORMATION FROM KERNEL RING
- *
- *****************************************************************/
-
-/**
- * Is specific ip inside the range
- *
- * Check if the ip is inside a IP range previously defined
- *
- * @param cmp the IP to compare
- * @param family the IP family
- *
- * @return It returns 1 if the IP is inside the range and 0 otherwise
- */
-static int ebpf_is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
-{
- if (!network_viewer_opt.excluded_ips && !network_viewer_opt.included_ips)
- return 1;
-
- uint32_t ipv4_test = htonl(cmp->addr32[0]);
- ebpf_network_viewer_ip_list_t *move = network_viewer_opt.excluded_ips;
- while (move) {
- if (family == AF_INET) {
- if (move->first.addr32[0] <= ipv4_test &&
- ipv4_test <= move->last.addr32[0])
- return 0;
- } else {
- if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
- memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
- return 0;
- }
- }
- move = move->next;
- }
-
- move = network_viewer_opt.included_ips;
- while (move) {
- if (family == AF_INET && move->ver == AF_INET) {
- if (move->first.addr32[0] <= ipv4_test &&
- move->last.addr32[0] >= ipv4_test)
- return 1;
- } else {
- if (move->ver == AF_INET6 &&
- memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
- memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
- return 1;
- }
- }
- move = move->next;
- }
-
- return 0;
-}
-
-/**
- * Is port inside range
- *
- * Verify if the cmp port is inside the range [first, last].
- * This function expects only the last parameter as big endian.
- *
- * @param cmp the value to compare
- *
- * @return It returns 1 when cmp is inside and 0 otherwise.
- */
-static int ebpf_is_port_inside_range(uint16_t cmp)
-{
- // We do not have restrictions for ports.
- if (!network_viewer_opt.excluded_port && !network_viewer_opt.included_port)
- return 1;
-
- // Test if port is excluded
- ebpf_network_viewer_port_list_t *move = network_viewer_opt.excluded_port;
- while (move) {
- if (move->cmp_first <= cmp && cmp <= move->cmp_last)
- return 0;
-
- move = move->next;
- }
-
- // Test if the port is inside allowed range
- move = network_viewer_opt.included_port;
- while (move) {
- if (move->cmp_first <= cmp && cmp <= move->cmp_last)
- return 1;
-
- move = move->next;
- }
-
- return 0;
-}
-
-/**
- * Hostname matches pattern
- *
- * @param cmp the value to compare
- *
- * @return It returns 1 when the value matches and zero otherwise.
- */
-int hostname_matches_pattern(char *cmp)
-{
- if (!network_viewer_opt.included_hostnames && !network_viewer_opt.excluded_hostnames)
- return 1;
-
- ebpf_network_viewer_hostname_list_t *move = network_viewer_opt.excluded_hostnames;
- while (move) {
- if (simple_pattern_matches(move->value_pattern, cmp))
- return 0;
-
- move = move->next;
- }
-
- move = network_viewer_opt.included_hostnames;
- while (move) {
- if (simple_pattern_matches(move->value_pattern, cmp))
- return 1;
-
- move = move->next;
- }
-
-
- return 0;
-}
-
-/**
- * Is socket allowed?
- *
- * Compare destination addresses and destination ports to define next steps
- *
- * @param key the socket read from kernel ring
- * @param data the socket data used also used to refuse some sockets.
- *
- * @return It returns 1 if this socket is inside the ranges and 0 otherwise.
- */
-int ebpf_is_socket_allowed(netdata_socket_idx_t *key, netdata_socket_t *data)
-{
- int ret = 0;
- // If family is not AF_UNSPEC and it is different of specified
- if (network_viewer_opt.family && network_viewer_opt.family != data->family)
- goto endsocketallowed;
-
- if (!ebpf_is_port_inside_range(key->dport))
- goto endsocketallowed;
-
- ret = ebpf_is_specific_ip_inside_range(&key->daddr, data->family);
-
-endsocketallowed:
- return ret;
-}
-
-/**
- * Hash accumulator
- *
- * @param values the values used to calculate the data.
- * @param family the connection family
- * @param end the values size.
- */
-static void ebpf_hash_socket_accumulator(netdata_socket_t *values, int end)
-{
- int i;
- uint8_t protocol = values[0].protocol;
- uint64_t ct = values[0].current_timestamp;
- uint64_t ft = values[0].first_timestamp;
- uint16_t family = AF_UNSPEC;
- uint32_t external_origin = values[0].external_origin;
- for (i = 1; i < end; i++) {
- netdata_socket_t *w = &values[i];
-
- values[0].tcp.call_tcp_sent += w->tcp.call_tcp_sent;
- values[0].tcp.call_tcp_received += w->tcp.call_tcp_received;
- values[0].tcp.tcp_bytes_received += w->tcp.tcp_bytes_received;
- values[0].tcp.tcp_bytes_sent += w->tcp.tcp_bytes_sent;
- values[0].tcp.close += w->tcp.close;
- values[0].tcp.retransmit += w->tcp.retransmit;
- values[0].tcp.ipv4_connect += w->tcp.ipv4_connect;
- values[0].tcp.ipv6_connect += w->tcp.ipv6_connect;
-
- if (!protocol)
- protocol = w->protocol;
-
- if (family == AF_UNSPEC)
- family = w->family;
-
- if (w->current_timestamp > ct)
- ct = w->current_timestamp;
-
- if (!ft)
- ft = w->first_timestamp;
-
- if (w->external_origin)
- external_origin = NETDATA_EBPF_SRC_IP_ORIGIN_EXTERNAL;
- }
-
- values[0].protocol = (!protocol)?IPPROTO_TCP:protocol;
- values[0].current_timestamp = ct;
- values[0].first_timestamp = ft;
- values[0].external_origin = external_origin;
-}
-
-/**
- * Translate socket
- *
- * Convert socket address to string
- *
- * @param dst structure where we will store
- * @param key the socket address
- */
-static void ebpf_socket_translate(netdata_socket_plus_t *dst, netdata_socket_idx_t *key)
-{
- uint32_t resolve = network_viewer_opt.service_resolution_enabled;
- char service[NI_MAXSERV];
- int ret;
- if (dst->data.family == AF_INET) {
- struct sockaddr_in ipv4_addr = { };
- ipv4_addr.sin_port = 0;
- ipv4_addr.sin_addr.s_addr = key->saddr.addr32[0];
- ipv4_addr.sin_family = AF_INET;
- if (resolve) {
- // NI_NAMEREQD : It is too slow
- ret = getnameinfo((struct sockaddr *) &ipv4_addr, sizeof(ipv4_addr), dst->socket_string.src_ip,
- INET6_ADDRSTRLEN, service, NI_MAXSERV, NI_NUMERICHOST | NI_NUMERICSERV);
- if (ret) {
- collector_error("Cannot resolve name: %s", gai_strerror(ret));
- resolve = 0;
- } else {
- ipv4_addr.sin_addr.s_addr = key->daddr.addr32[0];
-
- ipv4_addr.sin_port = key->dport;
- ret = getnameinfo((struct sockaddr *) &ipv4_addr, sizeof(ipv4_addr), dst->socket_string.dst_ip,
- INET6_ADDRSTRLEN, dst->socket_string.dst_port, NI_MAXSERV,
- NI_NUMERICHOST);
- if (ret) {
- collector_error("Cannot resolve name: %s", gai_strerror(ret));
- resolve = 0;
- }
- }
- }
-
- // When resolution fail, we should use addresses
- if (!resolve) {
- ipv4_addr.sin_addr.s_addr = key->saddr.addr32[0];
-
- if(!inet_ntop(AF_INET, &ipv4_addr.sin_addr, dst->socket_string.src_ip, INET6_ADDRSTRLEN))
- netdata_log_info("Cannot convert IP %u .", ipv4_addr.sin_addr.s_addr);
-
- ipv4_addr.sin_addr.s_addr = key->daddr.addr32[0];
-
- if(!inet_ntop(AF_INET, &ipv4_addr.sin_addr, dst->socket_string.dst_ip, INET6_ADDRSTRLEN))
- netdata_log_info("Cannot convert IP %u .", ipv4_addr.sin_addr.s_addr);
- snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport));
- }
- } else {
- struct sockaddr_in6 ipv6_addr = { };
- memcpy(&ipv6_addr.sin6_addr, key->saddr.addr8, sizeof(key->saddr.addr8));
- ipv6_addr.sin6_family = AF_INET6;
- if (resolve) {
- ret = getnameinfo((struct sockaddr *) &ipv6_addr, sizeof(ipv6_addr), dst->socket_string.src_ip,
- INET6_ADDRSTRLEN, service, NI_MAXSERV, NI_NUMERICHOST | NI_NUMERICSERV);
- if (ret) {
- collector_error("Cannot resolve name: %s", gai_strerror(ret));
- resolve = 0;
- } else {
- memcpy(&ipv6_addr.sin6_addr, key->daddr.addr8, sizeof(key->daddr.addr8));
- ret = getnameinfo((struct sockaddr *) &ipv6_addr, sizeof(ipv6_addr), dst->socket_string.dst_ip,
- INET6_ADDRSTRLEN, dst->socket_string.dst_port, NI_MAXSERV,
- NI_NUMERICHOST);
- if (ret) {
- collector_error("Cannot resolve name: %s", gai_strerror(ret));
- resolve = 0;
- }
- }
- }
-
- if (!resolve) {
- memcpy(&ipv6_addr.sin6_addr, key->saddr.addr8, sizeof(key->saddr.addr8));
- if(!inet_ntop(AF_INET6, &ipv6_addr.sin6_addr, dst->socket_string.src_ip, INET6_ADDRSTRLEN))
- netdata_log_info("Cannot convert IPv6 Address.");
-
- memcpy(&ipv6_addr.sin6_addr, key->daddr.addr8, sizeof(key->daddr.addr8));
- if(!inet_ntop(AF_INET6, &ipv6_addr.sin6_addr, dst->socket_string.dst_ip, INET6_ADDRSTRLEN))
- netdata_log_info("Cannot convert IPv6 Address.");
- snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport));
- }
- }
- dst->pid = key->pid;
-
- if (!strcmp(dst->socket_string.dst_port, "0"))
- snprintfz(dst->socket_string.dst_port, NI_MAXSERV, "%u", ntohs(key->dport));
-#ifdef NETDATA_DEV_MODE
- collector_info("New socket: { ORIGIN IP: %s, ORIGIN : %u, DST IP:%s, DST PORT: %s, PID: %u, PROTO: %d, FAMILY: %d}",
- dst->socket_string.src_ip,
- dst->data.external_origin,
- dst->socket_string.dst_ip,
- dst->socket_string.dst_port,
- dst->pid,
- dst->data.protocol,
- dst->data.family
- );
-#endif
-}
-
-/**
- * Update array vectors
- *
- * Read data from hash table and update vectors.
- *
- * @param em the structure with configuration
- */
-static void ebpf_update_array_vectors(ebpf_module_t *em)
-{
- netdata_thread_disable_cancelability();
- netdata_socket_idx_t key = {};
- netdata_socket_idx_t next_key = {};
-
- int maps_per_core = em->maps_per_core;
- int fd = em->maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd;
-
- netdata_socket_t *values = socket_values;
- size_t length = sizeof(netdata_socket_t);
- int test, end;
- if (maps_per_core) {
- length *= ebpf_nprocs;
- end = ebpf_nprocs;
- } else
- end = 1;
-
- // We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create
- // values for specific processor unless it is used to store data. As result of this behavior one the next socket
- // can have values from the previous one.
- memset(values, 0, length);
- time_t update_time = time(NULL);
- while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
- test = bpf_map_lookup_elem(fd, &key, values);
- if (test < 0) {
- goto end_socket_loop;
- }
-
- if (key.pid > (uint32_t)pid_max) {
- goto end_socket_loop;
- }
-
- ebpf_hash_socket_accumulator(values, end);
- ebpf_socket_fill_publish_apps(key.pid, values);
-
- // We update UDP to show info with charts, but we do not show them with functions
- /*
- if (key.dport == NETDATA_EBPF_UDP_PORT && values[0].protocol == IPPROTO_UDP) {
- bpf_map_delete_elem(fd, &key);
- goto end_socket_loop;
- }
- */
-
- // Discard non-bind sockets
- if (!key.daddr.addr64[0] && !key.daddr.addr64[1] && !key.saddr.addr64[0] && !key.saddr.addr64[1]) {
- bpf_map_delete_elem(fd, &key);
- goto end_socket_loop;
- }
-
- // When socket is not allowed, we do not append it to table, but we are still keeping it to accumulate data.
- if (!ebpf_is_socket_allowed(&key, values)) {
- goto end_socket_loop;
- }
-
- // Get PID structure
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- PPvoid_t judy_array = &ebpf_judy_pid.index.JudyLArray;
- netdata_ebpf_judy_pid_stats_t *pid_ptr = ebpf_get_pid_from_judy_unsafe(judy_array, key.pid);
- if (!pid_ptr) {
- goto end_socket_loop;
- }
-
- // Get Socket structure
- rw_spinlock_write_lock(&pid_ptr->socket_stats.rw_spinlock);
- netdata_socket_plus_t **socket_pptr = (netdata_socket_plus_t **)ebpf_judy_insert_unsafe(
- &pid_ptr->socket_stats.JudyLArray, values[0].first_timestamp);
- netdata_socket_plus_t *socket_ptr = *socket_pptr;
- bool translate = false;
- if (likely(*socket_pptr == NULL)) {
- *socket_pptr = aral_mallocz(aral_socket_table);
-
- socket_ptr = *socket_pptr;
-
- translate = true;
- }
- uint64_t prev_period = socket_ptr->data.current_timestamp;
- memcpy(&socket_ptr->data, &values[0], sizeof(netdata_socket_t));
- if (translate)
- ebpf_socket_translate(socket_ptr, &key);
- else { // Check socket was updated
- if (prev_period) {
- if (values[0].current_timestamp > prev_period) // Socket updated
- socket_ptr->last_update = update_time;
- else if ((update_time - socket_ptr->last_update) > em->update_every) {
- // Socket was not updated since last read
- JudyLDel(&pid_ptr->socket_stats.JudyLArray, values[0].first_timestamp, PJE0);
- aral_freez(aral_socket_table, socket_ptr);
- }
- } else // First time
- socket_ptr->last_update = update_time;
- }
-
- rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock);
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
-end_socket_loop:
- memset(values, 0, length);
- memcpy(&key, &next_key, sizeof(key));
- }
- netdata_thread_enable_cancelability();
-}
-
-/**
- * Socket thread
- *
- * Thread used to generate socket charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_read_socket_thread(void *ptr)
-{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- ebpf_update_array_vectors(em);
-
- int update_every = em->update_every;
- int counter = update_every - 1;
-
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- usec_t period = update_every * USEC_PER_SEC;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- ebpf_update_array_vectors(em);
-
- counter = 0;
- }
-
- return NULL;
-}
-
-/**
- * Fill Network Viewer Port list
- *
- * Fill the structure with values read from /proc or hash table.
- *
- * @param out the structure where we will store data.
- * @param value the ports we are listen to.
- * @param proto the protocol used for this connection.
- * @param in the structure with values read form different sources.
- */
-static inline void fill_nv_port_list(ebpf_network_viewer_port_list_t *out, uint16_t value, uint16_t proto,
- netdata_passive_connection_t *in)
-{
- out->first = value;
- out->protocol = proto;
- out->pid = in->pid;
- out->tgid = in->tgid;
- out->connections = in->counter;
-}
-
-/**
- * Update listen table
- *
- * Update link list when it is necessary.
- *
- * @param value the ports we are listen to.
- * @param proto the protocol used with port connection.
- * @param in the structure with values read form different sources.
- */
-void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *in)
-{
- ebpf_network_viewer_port_list_t *w;
- if (likely(listen_ports)) {
- ebpf_network_viewer_port_list_t *move = listen_ports, *store = listen_ports;
- while (move) {
- if (move->protocol == proto && move->first == value) {
- move->pid = in->pid;
- move->tgid = in->tgid;
- move->connections = in->counter;
- return;
- }
-
- store = move;
- move = move->next;
- }
-
- w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
- store->next = w;
- } else {
- w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
-
- listen_ports = w;
- }
- fill_nv_port_list(w, value, proto, in);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("The network viewer is monitoring inbound connections for port %u", ntohs(value));
-#endif
-}
-
-/**
- * Read listen table
- *
- * Read the table with all ports that we are listen on host.
- */
-static void read_listen_table()
-{
- netdata_passive_connection_idx_t key = {};
- netdata_passive_connection_idx_t next_key = {};
-
- int fd = socket_maps[NETDATA_SOCKET_LPORTS].map_fd;
- netdata_passive_connection_t value = {};
- while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
- int test = bpf_map_lookup_elem(fd, &key, &value);
- if (test < 0) {
- key = next_key;
- continue;
- }
-
- // The correct protocol must come from kernel
- update_listen_table(key.port, key.protocol, &value);
-
- key = next_key;
- memset(&value, 0, sizeof(value));
- }
-
- if (next_key.port && value.pid) {
- // The correct protocol must come from kernel
- update_listen_table(next_key.port, next_key.protocol, &value);
- }
-}
-
-/**
- * Read the hash table and store data to allocated vectors.
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
-{
- netdata_idx_t res[NETDATA_SOCKET_COUNTER];
- ebpf_read_global_table_stats(res,
- socket_hash_values,
- socket_maps[NETDATA_SOCKET_GLOBAL].map_fd,
- maps_per_core,
- NETDATA_KEY_CALLS_TCP_SENDMSG,
- NETDATA_SOCKET_COUNTER);
-
- ebpf_read_global_table_stats(stats,
- socket_hash_values,
- socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-
- socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
- socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
- socket_aggregated_data[NETDATA_IDX_TCP_CLOSE].call = res[NETDATA_KEY_CALLS_TCP_CLOSE];
- socket_aggregated_data[NETDATA_IDX_UDP_RECVBUF].call = res[NETDATA_KEY_CALLS_UDP_RECVMSG];
- socket_aggregated_data[NETDATA_IDX_UDP_SENDMSG].call = res[NETDATA_KEY_CALLS_UDP_SENDMSG];
- socket_aggregated_data[NETDATA_IDX_TCP_RETRANSMIT].call = res[NETDATA_KEY_TCP_RETRANSMIT];
- socket_aggregated_data[NETDATA_IDX_TCP_CONNECTION_V4].call = res[NETDATA_KEY_CALLS_TCP_CONNECT_IPV4];
- socket_aggregated_data[NETDATA_IDX_TCP_CONNECTION_V6].call = res[NETDATA_KEY_CALLS_TCP_CONNECT_IPV6];
-
- socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].ecall = res[NETDATA_KEY_ERROR_TCP_SENDMSG];
- socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].ecall = res[NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF];
- socket_aggregated_data[NETDATA_IDX_UDP_RECVBUF].ecall = res[NETDATA_KEY_ERROR_UDP_RECVMSG];
- socket_aggregated_data[NETDATA_IDX_UDP_SENDMSG].ecall = res[NETDATA_KEY_ERROR_UDP_SENDMSG];
- socket_aggregated_data[NETDATA_IDX_TCP_CONNECTION_V4].ecall = res[NETDATA_KEY_ERROR_TCP_CONNECT_IPV4];
- socket_aggregated_data[NETDATA_IDX_TCP_CONNECTION_V6].ecall = res[NETDATA_KEY_ERROR_TCP_CONNECT_IPV6];
-
- socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].bytes = res[NETDATA_KEY_BYTES_TCP_SENDMSG];
- socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].bytes = res[NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF];
- socket_aggregated_data[NETDATA_IDX_UDP_RECVBUF].bytes = res[NETDATA_KEY_BYTES_UDP_RECVMSG];
- socket_aggregated_data[NETDATA_IDX_UDP_SENDMSG].bytes = res[NETDATA_KEY_BYTES_UDP_SENDMSG];
-}
-
-/**
- * Fill publish apps when necessary.
- *
- * @param current_pid the PID that I am updating
- * @param ns the structure with data read from memory.
- */
-void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns)
-{
- ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid];
- if (!curr) {
- curr = ebpf_socket_stat_get();
- socket_bandwidth_curr[current_pid] = curr;
- }
-
- curr->bytes_sent += ns->tcp.tcp_bytes_sent;
- curr->bytes_received += ns->tcp.tcp_bytes_received;
- curr->call_tcp_sent += ns->tcp.call_tcp_sent;
- curr->call_tcp_received += ns->tcp.call_tcp_received;
- curr->retransmit += ns->tcp.retransmit;
- curr->call_close += ns->tcp.close;
- curr->call_tcp_v4_connection += ns->tcp.ipv4_connect;
- curr->call_tcp_v6_connection += ns->tcp.ipv6_connect;
-
- curr->call_udp_sent += ns->udp.call_udp_sent;
- curr->call_udp_received += ns->udp.call_udp_received;
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in PIDs.
- */
-static void ebpf_update_socket_cgroup()
-{
- ebpf_cgroup_target_t *ect ;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- ebpf_socket_publish_apps_t *publish = &ect->publish_socket;
- if (likely(socket_bandwidth_curr) && socket_bandwidth_curr[pid]) {
- ebpf_socket_publish_apps_t *in = socket_bandwidth_curr[pid];
-
- publish->bytes_sent = in->bytes_sent;
- publish->bytes_received = in->bytes_received;
- publish->call_tcp_sent = in->call_tcp_sent;
- publish->call_tcp_received = in->call_tcp_received;
- publish->retransmit = in->retransmit;
- publish->call_udp_sent = in->call_udp_sent;
- publish->call_udp_received = in->call_udp_received;
- publish->call_close = in->call_close;
- publish->call_tcp_v4_connection = in->call_tcp_v4_connection;
- publish->call_tcp_v6_connection = in->call_tcp_v6_connection;
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param fd structure used to store data
- * @param pids input data
- */
-static void ebpf_socket_sum_cgroup_pids(ebpf_socket_publish_apps_t *socket, struct pid_on_target2 *pids)
-{
- ebpf_socket_publish_apps_t accumulator;
- memset(&accumulator, 0, sizeof(accumulator));
-
- while (pids) {
- netdata_socket_t *w = &pids->socket;
-
- accumulator.bytes_received += w->tcp.tcp_bytes_received;
- accumulator.bytes_sent += w->tcp.tcp_bytes_sent;
- accumulator.call_tcp_received += w->tcp.call_tcp_received;
- accumulator.call_tcp_sent += w->tcp.call_tcp_sent;
- accumulator.retransmit += w->tcp.retransmit;
- accumulator.call_close += w->tcp.close;
- accumulator.call_tcp_v4_connection += w->tcp.ipv4_connect;
- accumulator.call_tcp_v6_connection += w->tcp.ipv6_connect;
- accumulator.call_udp_received += w->udp.call_udp_received;
- accumulator.call_udp_sent += w->udp.call_udp_sent;
-
- pids = pids->next;
- }
-
- socket->bytes_sent = (accumulator.bytes_sent >= socket->bytes_sent) ? accumulator.bytes_sent : socket->bytes_sent;
- socket->bytes_received = (accumulator.bytes_received >= socket->bytes_received) ? accumulator.bytes_received : socket->bytes_received;
- socket->call_tcp_sent = (accumulator.call_tcp_sent >= socket->call_tcp_sent) ? accumulator.call_tcp_sent : socket->call_tcp_sent;
- socket->call_tcp_received = (accumulator.call_tcp_received >= socket->call_tcp_received) ? accumulator.call_tcp_received : socket->call_tcp_received;
- socket->retransmit = (accumulator.retransmit >= socket->retransmit) ? accumulator.retransmit : socket->retransmit;
- socket->call_udp_sent = (accumulator.call_udp_sent >= socket->call_udp_sent) ? accumulator.call_udp_sent : socket->call_udp_sent;
- socket->call_udp_received = (accumulator.call_udp_received >= socket->call_udp_received) ? accumulator.call_udp_received : socket->call_udp_received;
- socket->call_close = (accumulator.call_close >= socket->call_close) ? accumulator.call_close : socket->call_close;
- socket->call_tcp_v4_connection = (accumulator.call_tcp_v4_connection >= socket->call_tcp_v4_connection) ?
- accumulator.call_tcp_v4_connection : socket->call_tcp_v4_connection;
- socket->call_tcp_v6_connection = (accumulator.call_tcp_v6_connection >= socket->call_tcp_v6_connection) ?
- accumulator.call_tcp_v6_connection : socket->call_tcp_v6_connection;
-}
-
-/**
- * Create specific socket charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_socket_charts(char *type, int update_every)
-{
- int order_basis = 5300;
- ebpf_create_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V4,
- "Calls to tcp_v4_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_TCP_V4_CONN_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_create_chart(type,
- NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "Calls to tcp_v6_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_TCP_V6_CONN_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6],
- 1,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SOCKET);
- }
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV,
- "Bytes received",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT,
- "Bytes sent",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- socket_publish_aggregated, 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- socket_publish_aggregated, 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
- "Calls to tcp_retransmit.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
- "Calls to udp_sendmsg",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-
- ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
- "Calls to udp_recvmsg",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
- NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- ebpf_create_global_dimension,
- &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
-}
-
-/**
- * Obsolete specific socket charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_socket_charts(char *type, int update_every)
-{
- int order_basis = 5300;
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_CONNECTION_TCP_V4, "", "Calls to tcp_v4_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_chart_obsolete(type,
- NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "",
- "Calls to tcp_v6_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "", "Bytes received",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_SENT, "","Bytes sent",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "", "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "", "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "", "Calls to tcp_retransmit.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "", "Calls to udp_sendmsg",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "", "Calls to udp_recvmsg",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + order_basis++, update_every);
-}
-
-/*
- * Send Specific Swap data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- */
-static void ebpf_send_specific_socket_data(char *type, ebpf_socket_publish_apps_t *values)
-{
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V4, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V4].name,
- (long long) values->call_tcp_v4_connection);
- ebpf_write_end_chart();
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_CONNECTION_TCP_V6, "");
- write_chart_dimension(
- socket_publish_aggregated[NETDATA_IDX_TCP_CONNECTION_V6].name, (long long)values->call_tcp_v6_connection);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name,
- (long long) values->bytes_sent);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name,
- (long long) values->bytes_received);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name,
- (long long) values->call_tcp_sent);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name,
- (long long) values->call_tcp_received);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT].name,
- (long long) values->retransmit);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG].name,
- (long long) values->call_udp_sent);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "");
- write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF].name,
- (long long) values->call_udp_received);
- ebpf_write_end_chart();
-}
-
-/**
- * Create Systemd Socket Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_socket_charts(int update_every)
-{
- int order = 20080;
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V4,
- "Calls to tcp_v4_connection", EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "Calls to tcp_v6_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
- }
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_RECV,
- "Bytes received", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_SENT,
- "Bytes sent", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
- "Calls to tcp_retransmit",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
- "Calls to udp_sendmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
- "Calls to udp_recvmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_socket_charts()
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v4_connection);
- }
- }
- ebpf_write_end_chart();
-
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V6, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v6_connection);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_received);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_sent);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_received);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.retransmit);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_sent);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_received);
- }
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Update Cgroup algorithm
- *
- * Change algorithm from absolute to incremental
- */
-void ebpf_socket_update_cgroup_algorithm()
-{
- int i;
- for (i = 0; i < NETDATA_MAX_SOCKET_VECTOR; i++) {
- netdata_publish_syscall_t *ptr = &socket_publish_aggregated[i];
- ptr->algorithm = ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX];
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-static void ebpf_socket_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_socket_sum_cgroup_pids(&ect->publish_socket, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_socket_charts(update_every);
- }
- ebpf_send_systemd_socket_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART)) {
- ebpf_create_specific_socket_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART && ect->updated) {
- ebpf_send_specific_socket_data(ect->name, &ect->publish_socket);
- } else {
- ebpf_obsolete_specific_socket_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART;
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS WITH THE MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Main loop for this collector.
- *
- * @param em the structure with thread information
- */
-static void socket_collector(ebpf_module_t *em)
-{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- int cgroups = em->cgroup_charts;
- if (cgroups)
- ebpf_socket_update_cgroup_algorithm();
-
- int socket_global_enabled = em->global_charts;
- int update_every = em->update_every;
- int maps_per_core = em->maps_per_core;
- int counter = update_every - 1;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts;
- if (socket_global_enabled) {
- read_listen_table();
- ebpf_socket_read_hash_global_tables(stats, maps_per_core);
- }
-
- pthread_mutex_lock(&collect_data_mutex);
- if (cgroups)
- ebpf_update_socket_cgroup();
-
- pthread_mutex_lock(&lock);
- if (socket_global_enabled)
- ebpf_socket_send_data(em);
-
- if (socket_apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_socket_send_apps_data(em, apps_groups_root_target);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- ebpf_send_data_aral_chart(ebpf_aral_socket_pid, em);
-#endif
-
- if (cgroups)
- ebpf_socket_send_cgroup_data(update_every);
-
- fflush(stdout);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO START THREAD
- *
- *****************************************************************/
-
-/**
- * Initialize vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- */
-static void ebpf_socket_initialize_global_vectors()
-{
- memset(socket_aggregated_data, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_syscall_stat_t));
- memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t));
- socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
-
- ebpf_socket_aral_init();
- socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
-
- aral_socket_table = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME,
- sizeof(netdata_socket_plus_t));
-
- socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
-
- ebpf_load_addresses(&tcp_v6_connect_address, -1);
-}
-
-/*****************************************************************
- *
- * EBPF SOCKET THREAD
- *
- *****************************************************************/
-
-/**
- * Link dimension name
- *
- * Link user specified names inside a link list.
- *
- * @param port the port number associated to the dimension name.
- * @param hash the calculated hash for the dimension name.
- * @param name the dimension name.
- */
-static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value)
-{
- int test = str2i(port);
- if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){
- netdata_log_error("The dimension given (%s = %s) has an invalid value and it will be ignored.", port, value);
- return;
- }
-
- ebpf_network_viewer_dim_name_t *w;
- w = callocz(1, sizeof(ebpf_network_viewer_dim_name_t));
-
- w->name = strdupz(value);
- w->hash = hash;
-
- w->port = (uint16_t) htons(test);
-
- ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
- if (unlikely(!names)) {
- network_viewer_opt.names = w;
- } else {
- for (; names->next; names = names->next) {
- if (names->port == w->port) {
- netdata_log_info("Duplicated definition for a service, the name %s will be ignored. ", names->name);
- freez(names->name);
- names->name = w->name;
- names->hash = w->hash;
- freez(w);
- return;
- }
- }
- names->next = w;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Adding values %s( %u) to dimension name list used on network viewer", w->name, htons(w->port));
-#endif
-}
-
-/**
- * Parse service Name section.
- *
- * This function gets the values that will be used to overwrite dimensions.
- *
- * @param cfg the configuration structure
- */
-void ebpf_parse_service_name_section(struct config *cfg)
-{
- struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION);
- if (co) {
- struct config_option *cv;
- for (cv = co->values; cv ; cv = cv->next) {
- ebpf_link_dimension_name(cv->name, cv->hash, cv->value);
- }
- }
-
- // Always associated the default port to Netdata
- ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
- if (names) {
- uint16_t default_port = htons(19999);
- while (names) {
- if (names->port == default_port)
- return;
-
- names = names->next;
- }
- }
-
- char *port_string = getenv("NETDATA_LISTEN_PORT");
- if (port_string) {
- // if variable has an invalid value, we assume netdata is using 19999
- int default_port = str2i(port_string);
- if (default_port > 0 && default_port < 65536)
- ebpf_link_dimension_name(port_string, simple_hash(port_string), "Netdata");
- }
-}
-
-/**
- * Parse table size options
- *
- * @param cfg configuration options read from user file.
- */
-void parse_table_size_options(struct config *cfg)
-{
- socket_maps[NETDATA_SOCKET_OPEN_SOCKET].user_input = (uint32_t) appconfig_get_number(cfg,
- EBPF_GLOBAL_SECTION,
- EBPF_CONFIG_SOCKET_MONITORING_SIZE,
- NETDATA_MAXIMUM_CONNECTIONS_ALLOWED);
-
- socket_maps[NETDATA_SOCKET_TABLE_UDP].user_input = (uint32_t) appconfig_get_number(cfg,
- EBPF_GLOBAL_SECTION,
- EBPF_CONFIG_UDP_SIZE, NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED);
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_socket_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
-
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- socket_bpf_obj = socket_bpf__open();
- if (!socket_bpf_obj)
- ret = -1;
- else
- ret = ebpf_socket_load_and_attach(socket_bpf_obj, em);
- }
-#endif
-
- if (ret) {
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
- }
-
- return ret;
-}
-
-/**
- * Socket thread
- *
- * Thread used to generate socket charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_socket_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_socket_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- if (em->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- collector_error("There is already a thread %s running", em->info.thread_name);
- return NULL;
- }
-
- em->maps = socket_maps;
-
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- // It was not enabled from main config file (ebpf.d.conf)
- if (!network_viewer_opt.enabled)
- network_viewer_opt.enabled = appconfig_get_boolean(&socket_config, EBPF_NETWORK_VIEWER_SECTION, "enabled",
- CONFIG_BOOLEAN_YES);
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- parse_table_size_options(&socket_config);
-
- ebpf_socket_initialize_global_vectors();
-
- if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0)
- em->mode = MODE_ENTRY;
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_socket_load_bpf(em)) {
- pthread_mutex_unlock(&lock);
- goto endsocket;
- }
-
- int algorithms[NETDATA_MAX_SOCKET_VECTOR] = {
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX
- };
- ebpf_global_labels(
- socket_aggregated_data, socket_publish_aggregated, socket_dimension_names, socket_id_names,
- algorithms, NETDATA_MAX_SOCKET_VECTOR);
-
- ebpf_read_socket.thread = mallocz(sizeof(netdata_thread_t));
- netdata_thread_create(ebpf_read_socket.thread,
- ebpf_read_socket.name,
- NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_read_socket_thread,
- em);
-
- pthread_mutex_lock(&lock);
- ebpf_socket_create_global_charts(em);
-
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- socket_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_SOCKET_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- socket_collector(em);
-
-endsocket:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
deleted file mode 100644
index a6d3e03b6..000000000
--- a/collectors/ebpf.plugin/ebpf_socket.h
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-#ifndef NETDATA_EBPF_SOCKET_H
-#define NETDATA_EBPF_SOCKET_H 1
-#include <stdint.h>
-#include "libnetdata/avl/avl.h"
-
-#include <sys/socket.h>
-#ifdef HAVE_NETDB_H
-#include <netdb.h>
-#endif
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_SOCKET "socket"
-#define NETDATA_EBPF_SOCKET_MODULE_DESC "Monitors TCP and UDP bandwidth. This thread is integrated with apps and cgroup."
-
-// Vector indexes
-#define NETDATA_UDP_START 3
-
-// config file
-#define NETDATA_NETWORK_CONFIG_FILE "network.conf"
-#define EBPF_NETWORK_VIEWER_SECTION "network connections"
-#define EBPF_SERVICE_NAME_SECTION "service name"
-#define EBPF_CONFIG_RESOLVE_HOSTNAME "resolve hostnames"
-#define EBPF_CONFIG_RESOLVE_SERVICE "resolve service names"
-#define EBPF_CONFIG_PORTS "ports"
-#define EBPF_CONFIG_HOSTNAMES "hostnames"
-#define EBPF_CONFIG_SOCKET_MONITORING_SIZE "socket monitoring table size"
-#define EBPF_CONFIG_UDP_SIZE "udp connection table size"
-
-enum ebpf_socket_table_list {
- NETDATA_SOCKET_GLOBAL,
- NETDATA_SOCKET_LPORTS,
- NETDATA_SOCKET_OPEN_SOCKET,
- NETDATA_SOCKET_TABLE_UDP,
- NETDATA_SOCKET_TABLE_CTRL
-};
-
-enum ebpf_socket_publish_index {
- NETDATA_IDX_TCP_SENDMSG,
- NETDATA_IDX_TCP_CLEANUP_RBUF,
- NETDATA_IDX_TCP_CLOSE,
- NETDATA_IDX_UDP_RECVBUF,
- NETDATA_IDX_UDP_SENDMSG,
- NETDATA_IDX_TCP_RETRANSMIT,
- NETDATA_IDX_TCP_CONNECTION_V4,
- NETDATA_IDX_TCP_CONNECTION_V6,
- NETDATA_IDX_INCOMING_CONNECTION_TCP,
- NETDATA_IDX_INCOMING_CONNECTION_UDP,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_MAX_SOCKET_VECTOR
-};
-
-enum socket_functions {
- NETDATA_FCNT_INET_CSK_ACCEPT,
- NETDATA_FCNT_TCP_RETRANSMIT,
- NETDATA_FCNT_CLEANUP_RBUF,
- NETDATA_FCNT_TCP_CLOSE,
- NETDATA_FCNT_UDP_RECEVMSG,
- NETDATA_FCNT_TCP_SENDMSG,
- NETDATA_FCNT_UDP_SENDMSG,
- NETDATA_FCNT_TCP_V4_CONNECT,
- NETDATA_FCNT_TCP_V6_CONNECT
-};
-
-typedef enum ebpf_socket_idx {
- NETDATA_KEY_CALLS_TCP_SENDMSG,
- NETDATA_KEY_ERROR_TCP_SENDMSG,
- NETDATA_KEY_BYTES_TCP_SENDMSG,
-
- NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF,
- NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF,
- NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF,
-
- NETDATA_KEY_CALLS_TCP_CLOSE,
-
- NETDATA_KEY_CALLS_UDP_RECVMSG,
- NETDATA_KEY_ERROR_UDP_RECVMSG,
- NETDATA_KEY_BYTES_UDP_RECVMSG,
-
- NETDATA_KEY_CALLS_UDP_SENDMSG,
- NETDATA_KEY_ERROR_UDP_SENDMSG,
- NETDATA_KEY_BYTES_UDP_SENDMSG,
-
- NETDATA_KEY_TCP_RETRANSMIT,
-
- NETDATA_KEY_CALLS_TCP_CONNECT_IPV4,
- NETDATA_KEY_ERROR_TCP_CONNECT_IPV4,
-
- NETDATA_KEY_CALLS_TCP_CONNECT_IPV6,
- NETDATA_KEY_ERROR_TCP_CONNECT_IPV6,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_SOCKET_COUNTER
-} ebpf_socket_index_t;
-
-#define NETDATA_SOCKET_KERNEL_FUNCTIONS "kernel"
-#define NETDATA_NETWORK_CONNECTIONS_GROUP "network connections"
-#define NETDATA_CGROUP_NET_GROUP "network (eBPF)"
-
-// Global chart name
-#define NETDATA_TCP_OUTBOUND_CONNECTIONS "tcp_outbound_conn"
-#define NETDATA_INBOUND_CONNECTIONS "inbound_conn"
-#define NETDATA_TCP_FUNCTION_COUNT "tcp_functions"
-#define NETDATA_TCP_FUNCTION_BITS "total_tcp_bandwidth"
-#define NETDATA_TCP_FUNCTION_ERROR "tcp_error"
-#define NETDATA_TCP_RETRANSMIT "tcp_retransmit"
-#define NETDATA_UDP_FUNCTION_COUNT "udp_functions"
-#define NETDATA_UDP_FUNCTION_BITS "total_udp_bandwidth"
-#define NETDATA_UDP_FUNCTION_ERROR "udp_error"
-
-// Charts created on Apps submenu
-#define NETDATA_NET_APPS_CONNECTION_TCP_V4 "outbound_conn_v4"
-#define NETDATA_NET_APPS_CONNECTION_TCP_V6 "outbound_conn_v6"
-#define NETDATA_NET_APPS_BANDWIDTH_SENT "total_bandwidth_sent"
-#define NETDATA_NET_APPS_BANDWIDTH_RECV "total_bandwidth_recv"
-#define NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS "bandwidth_tcp_send"
-#define NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS "bandwidth_tcp_recv"
-#define NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT "bandwidth_tcp_retransmit"
-#define NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS "bandwidth_udp_send"
-#define NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS "bandwidth_udp_recv"
-
-// Port range
-#define NETDATA_MINIMUM_PORT_VALUE 1
-#define NETDATA_MAXIMUM_PORT_VALUE 65535
-#define NETDATA_COMPILED_CONNECTIONS_ALLOWED 65535U
-#define NETDATA_MAXIMUM_CONNECTIONS_ALLOWED 16384U
-#define NETDATA_COMPILED_UDP_CONNECTIONS_ALLOWED 8192U
-#define NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED 4096U
-
-#define NETDATA_MINIMUM_IPV4_CIDR 0
-#define NETDATA_MAXIMUM_IPV4_CIDR 32
-
-// Contexts
-#define NETDATA_CGROUP_TCP_V4_CONN_CONTEXT "cgroup.net_conn_ipv4"
-#define NETDATA_CGROUP_TCP_V6_CONN_CONTEXT "cgroup.net_conn_ipv6"
-#define NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT "cgroup.net_bytes_recv"
-#define NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT "cgroup.net_bytes_send"
-#define NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT "cgroup.net_tcp_recv"
-#define NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT "cgroup.net_tcp_send"
-#define NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT "cgroup.net_retransmit"
-#define NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT "cgroup.net_udp_recv"
-#define NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT "cgroup.net_udp_send"
-
-#define NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT "services.net_conn_ipv4"
-#define NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT "services.net_conn_ipv6"
-#define NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT "services.net_bytes_recv"
-#define NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT "services.net_bytes_send"
-#define NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT "services.net_tcp_recv"
-#define NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT "services.net_tcp_send"
-#define NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT "services.net_retransmit"
-#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "services.net_udp_recv"
-#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "services.net_udp_send"
-
-// ARAL name
-#define NETDATA_EBPF_SOCKET_ARAL_NAME "ebpf_socket"
-#define NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME "ebpf_pid_socket"
-#define NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME "ebpf_socket_tbl"
-
-typedef struct ebpf_socket_publish_apps {
- // Data read
- uint64_t bytes_sent; // Bytes sent
- uint64_t bytes_received; // Bytes received
- uint64_t call_tcp_sent; // Number of times tcp_sendmsg was called
- uint64_t call_tcp_received; // Number of times tcp_cleanup_rbuf was called
- uint64_t retransmit; // Number of times tcp_retransmit was called
- uint64_t call_udp_sent; // Number of times udp_sendmsg was called
- uint64_t call_udp_received; // Number of times udp_recvmsg was called
- uint64_t call_close; // Number of times tcp_close was called
- uint64_t call_tcp_v4_connection;// Number of times tcp_v4_connect was called
- uint64_t call_tcp_v6_connection;// Number of times tcp_v6_connect was called
-} ebpf_socket_publish_apps_t;
-
-typedef struct ebpf_network_viewer_dimension_names {
- char *name;
- uint32_t hash;
-
- uint16_t port;
-
- struct ebpf_network_viewer_dimension_names *next;
-} ebpf_network_viewer_dim_name_t ;
-
-typedef struct ebpf_network_viewer_port_list {
- char *value;
- uint32_t hash;
-
- uint16_t first;
- uint16_t last;
-
- uint16_t cmp_first;
- uint16_t cmp_last;
-
- uint16_t protocol;
- uint32_t pid;
- uint32_t tgid;
- uint64_t connections;
- struct ebpf_network_viewer_port_list *next;
-} ebpf_network_viewer_port_list_t;
-
-typedef struct netdata_passive_connection {
- uint32_t tgid;
- uint32_t pid;
- uint64_t counter;
-} netdata_passive_connection_t;
-
-typedef struct netdata_passive_connection_idx {
- uint16_t protocol;
- uint16_t port;
-} netdata_passive_connection_idx_t;
-
-/**
- * Union used to store ip addresses
- */
-union netdata_ip_t {
- uint8_t addr8[16];
- uint16_t addr16[8];
- uint32_t addr32[4];
- uint64_t addr64[2];
-};
-
-typedef struct ebpf_network_viewer_ip_list {
- char *value; // IP value
- uint32_t hash; // IP hash
-
- uint8_t ver; // IP version
-
- union netdata_ip_t first; // The IP address informed
- union netdata_ip_t last; // The IP address informed
-
- struct ebpf_network_viewer_ip_list *next;
-} ebpf_network_viewer_ip_list_t;
-
-typedef struct ebpf_network_viewer_hostname_list {
- char *value; // IP value
- uint32_t hash; // IP hash
-
- SIMPLE_PATTERN *value_pattern;
-
- struct ebpf_network_viewer_hostname_list *next;
-} ebpf_network_viewer_hostname_list_t;
-
-typedef struct ebpf_network_viewer_options {
- RW_SPINLOCK rw_spinlock;
-
- uint32_t enabled;
- uint32_t family; // AF_INET, AF_INET6 or AF_UNSPEC (both)
-
- uint32_t hostname_resolution_enabled;
- uint32_t service_resolution_enabled;
-
- ebpf_network_viewer_port_list_t *excluded_port;
- ebpf_network_viewer_port_list_t *included_port;
-
- ebpf_network_viewer_dim_name_t *names;
-
- ebpf_network_viewer_ip_list_t *excluded_ips;
- ebpf_network_viewer_ip_list_t *included_ips;
-
- ebpf_network_viewer_hostname_list_t *excluded_hostnames;
- ebpf_network_viewer_hostname_list_t *included_hostnames;
-
- ebpf_network_viewer_ip_list_t *ipv4_local_ip;
- ebpf_network_viewer_ip_list_t *ipv6_local_ip;
-} ebpf_network_viewer_options_t;
-
-extern ebpf_network_viewer_options_t network_viewer_opt;
-
-/**
- * Structure to store socket information
- */
-typedef struct netdata_socket {
- // Timestamp
- uint64_t first_timestamp;
- uint64_t current_timestamp;
- // Socket additional info
- uint16_t protocol;
- uint16_t family;
- uint32_t external_origin;
- struct {
- uint32_t call_tcp_sent;
- uint32_t call_tcp_received;
- uint64_t tcp_bytes_sent;
- uint64_t tcp_bytes_received;
- uint32_t close; //It is never used with UDP
- uint32_t retransmit; //It is never used with UDP
- uint32_t ipv4_connect;
- uint32_t ipv6_connect;
- } tcp;
-
- struct {
- uint32_t call_udp_sent;
- uint32_t call_udp_received;
- uint64_t udp_bytes_sent;
- uint64_t udp_bytes_received;
- } udp;
-} netdata_socket_t;
-
-typedef enum netdata_socket_flags {
- NETDATA_SOCKET_FLAGS_ALREADY_OPEN = (1<<0)
-} netdata_socket_flags_t;
-
-typedef enum netdata_socket_src_ip_origin {
- NETDATA_EBPF_SRC_IP_ORIGIN_LOCAL,
- NETDATA_EBPF_SRC_IP_ORIGIN_EXTERNAL
-} netdata_socket_src_ip_origin_t;
-
-typedef struct netata_socket_plus {
- netdata_socket_t data; // Data read from database
- uint32_t pid;
- time_t last_update;
- netdata_socket_flags_t flags;
-
- struct {
- char src_ip[INET6_ADDRSTRLEN + 1];
- // uint16_t src_port;
- char dst_ip[INET6_ADDRSTRLEN+ 1];
- char dst_port[NI_MAXSERV + 1];
- } socket_string;
-} netdata_socket_plus_t;
-
-extern ARAL *aral_socket_table;
-
-/**
- * Index used together previous structure
- */
-typedef struct netdata_socket_idx {
- union netdata_ip_t saddr;
- //uint16_t sport;
- union netdata_ip_t daddr;
- uint16_t dport;
- uint32_t pid;
-} netdata_socket_idx_t;
-
-void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean);
-extern ebpf_network_viewer_port_list_t *listen_ports;
-void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values);
-void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
-void ebpf_parse_service_name_section(struct config *cfg);
-void ebpf_parse_ips_unsafe(char *ptr);
-void ebpf_parse_ports(char *ptr);
-void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em);
-void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns);
-
-
-extern struct config socket_config;
-extern netdata_ebpf_targets_t socket_targets[];
-
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
deleted file mode 100644
index 106ff4f29..000000000
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ /dev/null
@@ -1,286 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_softirq.h"
-
-struct config softirq_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-#define SOFTIRQ_MAP_LATENCY 0
-static ebpf_local_maps_t softirq_maps[] = {
- {
- .name = "tbl_softirq",
- .internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- /* end */
- {
- .name = NULL,
- .internal_input = 0,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }
-};
-
-#define SOFTIRQ_TP_CLASS_IRQ "irq"
-static ebpf_tracepoint_t softirq_tracepoints[] = {
- {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"},
- {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"},
- /* end */
- {.enabled = false, .class = NULL, .event = NULL}
-};
-
-// these must be in the order defined by the kernel:
-// https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13
-static softirq_val_t softirq_vals[] = {
- {.name = "HI", .latency = 0},
- {.name = "TIMER", .latency = 0},
- {.name = "NET_TX", .latency = 0},
- {.name = "NET_RX", .latency = 0},
- {.name = "BLOCK", .latency = 0},
- {.name = "IRQ_POLL", .latency = 0},
- {.name = "TASKLET", .latency = 0},
- {.name = "SCHED", .latency = 0},
- {.name = "HRTIMER", .latency = 0},
- {.name = "RCU", .latency = 0},
-};
-
-// tmp store for soft IRQ values we get from a per-CPU eBPF map.
-static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_softirq_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
- "softirq_latency",
- "",
- "Software IRQ latency",
- EBPF_COMMON_DIMENSION_MILLISECONDS,
- "softirqs",
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
- em->update_every);
-}
-
-/**
- * Cleanup
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void softirq_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
-
- ebpf_obsolete_softirq_global(em);
-
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
- ebpf_disable_tracepoint(&softirq_tracepoints[i]);
- }
- freez(softirq_ebpf_vals);
- softirq_ebpf_vals = NULL;
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- * MAIN LOOP
- *****************************************************************/
-
-/**
- * Read Latency Map
- *
- * Read data from kernel ring to plot for users.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void softirq_read_latency_map(int maps_per_core)
-{
- int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
- int i;
- size_t length = sizeof(softirq_ebpf_val_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
- int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
- if (unlikely(test < 0)) {
- continue;
- }
-
- uint64_t total_latency = 0;
- int cpu_i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (cpu_i = 0; cpu_i < end; cpu_i++) {
- total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
- }
-
- softirq_vals[i].latency = total_latency;
- memset(softirq_ebpf_vals, 0, length);
- }
-}
-
-static void softirq_create_charts(int update_every)
-{
- ebpf_create_chart(
- NETDATA_EBPF_SYSTEM_GROUP,
- "softirq_latency",
- "Software IRQ latency",
- EBPF_COMMON_DIMENSION_MILLISECONDS,
- "softirqs",
- NULL,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
- NULL, NULL, 0, update_every,
- NETDATA_EBPF_MODULE_NAME_SOFTIRQ
- );
-
- fflush(stdout);
-}
-
-static void softirq_create_dims()
-{
- uint32_t i;
- for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
- ebpf_write_global_dimension(
- softirq_vals[i].name, softirq_vals[i].name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
- );
- }
-}
-
-static inline void softirq_write_dims()
-{
- uint32_t i;
- for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
- write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency);
- }
-}
-
-/**
-* Main loop for this collector.
-*/
-static void softirq_collector(ebpf_module_t *em)
-{
- softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
-
- // create chart and static dims.
- pthread_mutex_lock(&lock);
- softirq_create_charts(em->update_every);
- softirq_create_dims();
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- // loop and read from published data until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- //This will be cancelled by its parent
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- softirq_read_latency_map(maps_per_core);
- pthread_mutex_lock(&lock);
-
- // write dims now for all hitherto discovered IRQs.
- ebpf_write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency", "");
- softirq_write_dims();
- ebpf_write_end_chart();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- * EBPF SOFTIRQ THREAD
- *****************************************************************/
-
-/**
- * Soft IRQ latency thread.
- *
- * @param ptr a `ebpf_module_t *`.
- * @return always NULL.
- */
-void *ebpf_softirq_thread(void *ptr)
-{
- netdata_thread_cleanup_push(softirq_cleanup, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = softirq_maps;
-
- if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
- goto endsoftirq;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- goto endsoftirq;
- }
-
- softirq_collector(em);
-
-endsoftirq:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/collectors/ebpf.plugin/ebpf_softirq.h
deleted file mode 100644
index 4ef36775a..000000000
--- a/collectors/ebpf.plugin/ebpf_softirq.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_SOFTIRQ_H
-#define NETDATA_EBPF_SOFTIRQ_H 1
-
-// Module observation
-#define NETDATA_EBPF_SOFTIRQ_MODULE_DESC "Show time spent servicing individual software interrupt requests (soft IRQs)."
-
-/*****************************************************************
- * copied from kernel-collectors repo, with modifications needed
- * for inclusion here.
- *****************************************************************/
-
-#define NETDATA_SOFTIRQ_MAX_IRQS 10
-
-typedef struct softirq_ebpf_val {
- uint64_t latency;
- uint64_t ts;
-} softirq_ebpf_val_t;
-
-/*****************************************************************
- * below this is eBPF plugin-specific code.
- *****************************************************************/
-
-#define NETDATA_EBPF_MODULE_NAME_SOFTIRQ "softirq"
-#define NETDATA_SOFTIRQ_CONFIG_FILE "softirq.conf"
-
-typedef struct sofirq_val {
- uint64_t latency;
- char *name;
-} softirq_val_t;
-
-extern struct config softirq_config;
-void *ebpf_softirq_thread(void *ptr);
-
-#endif /* NETDATA_EBPF_SOFTIRQ_H */
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
deleted file mode 100644
index fb007f928..000000000
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ /dev/null
@@ -1,1030 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_swap.h"
-
-static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" };
-static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END];
-static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END];
-
-static netdata_idx_t swap_hash_values[NETDATA_SWAP_END];
-static netdata_idx_t *swap_values = NULL;
-
-netdata_publish_swap_t *swap_vector = NULL;
-
-struct config swap_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "swap_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "tbl_swap", .internal_input = NETDATA_SWAP_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects
- */
-static void ebpf_swap_disable_probe(struct swap_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_swap_readpage_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_swap_writepage_probe, false);
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_swap_disable_trampoline(struct swap_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_swap_writepage_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_swap_set_trampoline_target(struct swap_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_swap_readpage_fentry, 0,
- swap_targets[NETDATA_KEY_SWAP_READPAGE_CALL].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_swap_writepage_fentry, 0,
- swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
-}
-
-/**
- * Mount Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_swap_attach_kprobe(struct swap_bpf *obj)
-{
- obj->links.netdata_swap_readpage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_readpage_probe,
- false,
- swap_targets[NETDATA_KEY_SWAP_READPAGE_CALL].name);
- int ret = libbpf_get_error(obj->links.netdata_swap_readpage_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_swap_writepage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_writepage_probe,
- false,
- swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name);
- ret = libbpf_get_error(obj->links.netdata_swap_writepage_probe);
- if (ret)
- return -1;
-
- return 0;
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_swap_set_hash_tables(struct swap_bpf *obj)
-{
- swap_maps[NETDATA_PID_SWAP_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_pid_swap);
- swap_maps[NETDATA_SWAP_CONTROLLER].map_fd = bpf_map__fd(obj->maps.swap_ctrl);
- swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd = bpf_map__fd(obj->maps.tbl_swap);
-}
-
-/**
- * Adjust Map
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_swap_adjust_map(struct swap_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.tbl_pid_swap, &swap_maps[NETDATA_PID_SWAP_TABLE],
- em, bpf_map__name(obj->maps.tbl_pid_swap));
-
- ebpf_update_map_type(obj->maps.tbl_pid_swap, &swap_maps[NETDATA_PID_SWAP_TABLE]);
- ebpf_update_map_type(obj->maps.tbl_swap, &swap_maps[NETDATA_SWAP_GLOBAL_TABLE]);
- ebpf_update_map_type(obj->maps.swap_ctrl, &swap_maps[NETDATA_SWAP_CONTROLLER]);
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_swap_disable_release_task(struct swap_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_KEY_SWAP_READPAGE_CALL].mode;
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_swap_disable_probe(obj);
-
- ebpf_swap_set_trampoline_target(obj);
- } else {
- ebpf_swap_disable_trampoline(obj);
- }
-
- ebpf_swap_adjust_map(obj, em);
-
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_swap_disable_release_task(obj);
-
- int ret = swap_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- ret = (test == EBPF_LOAD_TRAMPOLINE) ? swap_bpf__attach(obj) : ebpf_swap_attach_kprobe(obj);
- if (!ret) {
- ebpf_swap_set_hash_tables(obj);
-
- ebpf_update_controller(swap_maps[NETDATA_SWAP_CONTROLLER].map_fd, em);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_swap_charts(char *type, int update_every);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_swap_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_MEM_SWAP_READ_CHART,
- "",
- "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_SWAP_READ_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_MEM_SWAP_WRITE_CHART,
- "",
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CGROUP_SWAP_WRITE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101,
- em->update_every);
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_swap_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_swap_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_swap_charts(ect->name, em->update_every);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_swap_apps_charts(struct ebpf_module *em)
-{
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SWAP_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_swap_readpage",
- "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_swap_readpage",
- 20070,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_swap_writepage",
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_swap_writepage",
- 20071,
- update_every);
- w->charts_created &= ~(1<<EBPF_MODULE_SWAP_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_swap_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_MEM_SWAP_CHART,
- "",
- "Calls to access swap memory",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_MEM_SWAP_CALLS,
- em->update_every);
-}
-
-/**
- * Swap exit
- *
- * Cancel thread and exit.
- *
- * @param ptr thread data.
- */
-static void ebpf_swap_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_swap_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_swap_apps_charts(em);
- }
-
- ebpf_obsolete_swap_global(em);
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (bpf_obj) {
- swap_bpf__destroy(bpf_obj);
- bpf_obj = NULL;
- }
-#endif
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * COLLECTOR THREAD
- *
- *****************************************************************/
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void swap_apps_accumulator(netdata_publish_swap_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_publish_swap_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_publish_swap_t *w = &out[i];
- total->write += w->write;
- total->read += w->read;
- }
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void swap_fill_pid(uint32_t current_pid, netdata_publish_swap_t *publish)
-{
- netdata_publish_swap_t *curr = swap_pid[current_pid];
- if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_swap_t));
- swap_pid[current_pid] = curr;
- }
-
- memcpy(curr, publish, sizeof(netdata_publish_swap_t));
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_update_swap_cgroup(int maps_per_core)
-{
- ebpf_cgroup_target_t *ect ;
- netdata_publish_swap_t *cv = swap_vector;
- int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_swap_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_publish_swap_t *out = &pids->swap;
- if (likely(swap_pid) && swap_pid[pid]) {
- netdata_publish_swap_t *in = swap_pid[pid];
-
- memcpy(out, in, sizeof(netdata_publish_swap_t));
- } else {
- memset(cv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- swap_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_publish_swap_t));
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
- }
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Read APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_swap_apps_table(int maps_per_core)
-{
- netdata_publish_swap_t *cv = swap_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_swap_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
- }
-
- swap_apps_accumulator(cv, maps_per_core);
-
- swap_fill_pid(key, cv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
-* Send global
-*
-* Send global charts to Netdata
-*/
-static void swap_send_global()
-{
- write_io_chart(NETDATA_MEM_SWAP_CHART, NETDATA_EBPF_MEMORY_GROUP,
- swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].dimension,
- (long long) swap_hash_values[NETDATA_KEY_SWAP_WRITEPAGE_CALL],
- swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].dimension,
- (long long) swap_hash_values[NETDATA_KEY_SWAP_READPAGE_CALL]);
-}
-
-/**
- * Read global counter
- *
- * Read the table with number of calls to all functions
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_swap_read_global_table(netdata_idx_t *stats, int maps_per_core)
-{
- ebpf_read_global_table_stats(swap_hash_values,
- swap_values,
- swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd,
- maps_per_core,
- NETDATA_KEY_SWAP_READPAGE_CALL,
- NETDATA_SWAP_END);
-
- ebpf_read_global_table_stats(stats,
- swap_values,
- swap_maps[NETDATA_SWAP_CONTROLLER].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param swap
- * @param root
- */
-static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_target *root)
-{
- uint64_t local_read = 0;
- uint64_t local_write = 0;
-
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_swap_t *w = swap_pid[pid];
- if (w) {
- local_write += w->write;
- local_read += w->read;
- }
- root = root->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- swap->write = (local_write >= swap->write) ? local_write : swap->write;
- swap->read = (local_read >= swap->read) ? local_read : swap->read;
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param root the target list.
-*/
-void ebpf_swap_send_apps_data(struct ebpf_target *root)
-{
- struct ebpf_target *w;
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SWAP_IDX))))
- continue;
-
- ebpf_swap_sum_pids(&w->swap, w->root_pid);
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_swap_readpage");
- write_chart_dimension("calls", (long long) w->swap.read);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_swap_writepage");
- write_chart_dimension("calls", (long long) w->swap.write);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param swap
- * @param root
- */
-static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_on_target2 *pids)
-{
- uint64_t local_read = 0;
- uint64_t local_write = 0;
-
- while (pids) {
- netdata_publish_swap_t *w = &pids->swap;
- local_write += w->write;
- local_read += w->read;
-
- pids = pids->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- swap->write = (local_write >= swap->write) ? local_write : swap->write;
- swap->read = (local_read >= swap->read) ? local_read : swap->read;
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- */
-static void ebpf_send_systemd_swap_charts()
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.write);
- }
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Create specific swap charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_specific_swap_charts(char *type, int update_every)
-{
- ebpf_create_chart(type, NETDATA_MEM_SWAP_READ_CHART,
- "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100,
- ebpf_create_global_dimension,
- swap_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
-
- ebpf_create_chart(type, NETDATA_MEM_SWAP_WRITE_CHART,
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101,
- ebpf_create_global_dimension,
- &swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL], 1,
- update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
-}
-
-/**
- * Create specific swap charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_obsolete_specific_swap_charts(char *type, int update_every)
-{
- ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART, "", "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_READ_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "", "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_WRITE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, update_every);
-}
-
-/*
- * Send Specific Swap data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- */
-static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *values)
-{
- ebpf_write_begin_chart(type, NETDATA_MEM_SWAP_READ_CHART, "");
- write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].name, (long long) values->read);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_MEM_SWAP_WRITE_CHART, "");
- write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name, (long long) values->write);
- ebpf_write_end_chart();
-}
-
-/**
- * Create Systemd Swap Charts
- *
- * Create charts when systemd is enabled
- *
- * @param update_every value to overwrite the update frequency set by the server.
- **/
-static void ebpf_create_systemd_swap_charts(int update_every)
-{
- ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_READ_CHART,
- "Calls to swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_READ_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_WRITE_CHART,
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20192,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param update_every value to overwrite the update frequency set by the server.
-*/
-void ebpf_swap_send_cgroup_data(int update_every)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_swap_sum_cgroup_pids(&ect->publish_systemd_swap, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
-
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_swap_charts(update_every);
- fflush(stdout);
- }
- ebpf_send_systemd_swap_charts();
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) && ect->updated) {
- ebpf_create_specific_swap_charts(ect->name, update_every);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) {
- if (ect->updated) {
- ebpf_send_specific_swap_data(ect->name, &ect->publish_systemd_swap);
- } else {
- ebpf_obsolete_specific_swap_charts(ect->name, update_every);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void swap_collector(ebpf_module_t *em)
-{
- int cgroup = em->cgroup_charts;
- int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_swap_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_swap_apps_table(maps_per_core);
-
- if (cgroup)
- ebpf_update_swap_cgroup(maps_per_core);
-
- pthread_mutex_lock(&lock);
-
- swap_send_global();
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_swap_send_apps_data(apps_groups_root_target);
-
- if (cgroup)
- ebpf_swap_send_cgroup_data(update_every);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * INITIALIZE THREAD
- *
- *****************************************************************/
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_swap_readpage",
- "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_swap_readpage",
- 20070,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SWAP);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_swap_writepage",
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_swap_writepage",
- 20071,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SWAP);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- w->charts_created |= 1<<EBPF_MODULE_SWAP_IDX;
- }
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/**
- * Allocate vectors used with this thread.
- *
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_swap_allocate_global_vectors(int apps)
-{
- if (apps)
- swap_pid = callocz((size_t)pid_max, sizeof(netdata_publish_swap_t *));
-
- swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_swap_t));
-
- swap_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-
- memset(swap_hash_values, 0, sizeof(swap_hash_values));
-}
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_swap_charts(int update_every)
-{
- ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_MEM_SWAP_CHART,
- "Calls to access swap memory",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_MEM_SWAP_CALLS,
- ebpf_create_global_dimension,
- swap_publish_aggregated, NETDATA_SWAP_END,
- update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
-
- fflush(stdout);
-}
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_swap_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SWAP_READPAGE_CALL].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- bpf_obj = swap_bpf__open();
- if (!bpf_obj)
- ret = -1;
- else
- ret = ebpf_swap_load_and_attach(bpf_obj, em);
- }
-#endif
-
- if (ret)
- netdata_log_error("%s %s", EBPF_DEFAULT_ERROR_MSG, em->info.thread_name);
-
- return ret;
-}
-
-/**
- * SWAP thread
- *
- * Thread used to make swap thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_swap_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_swap_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = swap_maps;
-
- ebpf_update_pid_table(&swap_maps[NETDATA_PID_SWAP_TABLE], em);
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_swap_load_bpf(em)) {
- goto endswap;
- }
-
- ebpf_swap_allocate_global_vectors(em->apps_charts);
-
- int algorithms[NETDATA_SWAP_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
- ebpf_global_labels(swap_aggregated_data, swap_publish_aggregated, swap_dimension_name, swap_dimension_name,
- algorithms, NETDATA_SWAP_END);
-
- pthread_mutex_lock(&lock);
- ebpf_create_swap_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
- pthread_mutex_unlock(&lock);
-
- swap_collector(em);
-
-endswap:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h
deleted file mode 100644
index 79e9a01ac..000000000
--- a/collectors/ebpf.plugin/ebpf_swap.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_SWAP_H
-#define NETDATA_EBPF_SWAP_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_SWAP "swap"
-#define NETDATA_EBPF_SWAP_MODULE_DESC "Monitor swap space usage. This thread is integrated with apps and cgroup."
-
-#define NETDATA_SWAP_SLEEP_MS 850000ULL
-
-// charts
-#define NETDATA_MEM_SWAP_CHART "swapcalls"
-#define NETDATA_MEM_SWAP_READ_CHART "swap_read_call"
-#define NETDATA_MEM_SWAP_WRITE_CHART "swap_write_call"
-#define NETDATA_SWAP_SUBMENU "swap"
-
-// configuration file
-#define NETDATA_DIRECTORY_SWAP_CONFIG_FILE "swap.conf"
-
-// Contexts
-#define NETDATA_CGROUP_SWAP_READ_CONTEXT "cgroup.swap_read"
-#define NETDATA_CGROUP_SWAP_WRITE_CONTEXT "cgroup.swap_write"
-#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "services.swap_read"
-#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "services.swap_write"
-
-typedef struct netdata_publish_swap {
- uint64_t read;
- uint64_t write;
-} netdata_publish_swap_t;
-
-enum swap_tables {
- NETDATA_PID_SWAP_TABLE,
- NETDATA_SWAP_CONTROLLER,
- NETDATA_SWAP_GLOBAL_TABLE
-};
-
-enum swap_counters {
- NETDATA_KEY_SWAP_READPAGE_CALL,
- NETDATA_KEY_SWAP_WRITEPAGE_CALL,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_SWAP_END
-};
-
-void *ebpf_swap_thread(void *ptr);
-void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr);
-
-extern struct config swap_config;
-extern netdata_ebpf_targets_t swap_targets[];
-
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
deleted file mode 100644
index a16318107..000000000
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ /dev/null
@@ -1,739 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_sync.h"
-
-static char *sync_counter_dimension_name[NETDATA_SYNC_IDX_END] = { "sync", "syncfs", "msync", "fsync", "fdatasync",
- "sync_file_range" };
-static netdata_syscall_stat_t sync_counter_aggregated_data[NETDATA_SYNC_IDX_END];
-static netdata_publish_syscall_t sync_counter_publish_aggregated[NETDATA_SYNC_IDX_END];
-
-static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END];
-
-ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-ebpf_local_maps_t syncfs_maps[] = {{.name = "tbl_syncfs", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-ebpf_local_maps_t msync_maps[] = {{.name = "tbl_msync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-ebpf_local_maps_t fsync_maps[] = {{.name = "tbl_fsync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-ebpf_local_maps_t fdatasync_maps[] = {{.name = "tbl_fdatasync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-struct config sync_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NETDATA_SYSCALLS_MSYNC, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NETDATA_SYSCALLS_FSYNC, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NETDATA_SYSCALLS_FDATASYNC, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef LIBBPF_MAJOR_VERSION
-/*****************************************************************
- *
- * BTF FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Disable probe
- *
- * Disable kprobe to use another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_sync_disable_probe(struct sync_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_sync_kprobe, false);
-}
-
-/**
- * Disable trampoline
- *
- * Disable trampoline to use another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static inline void ebpf_sync_disable_trampoline(struct sync_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_sync_fentry, false);
-}
-
-/**
- * Disable tracepoint
- *
- * Disable tracepoints according information given.
- *
- * @param obj object loaded
- * @param idx Which syscall will not be disabled
- */
-void ebpf_sync_disable_tracepoints(struct sync_bpf *obj, sync_syscalls_index_t idx)
-{
- if (idx != NETDATA_SYNC_SYNC_IDX)
- bpf_program__set_autoload(obj->progs.netdata_sync_entry, false);
-
- if (idx != NETDATA_SYNC_SYNCFS_IDX)
- bpf_program__set_autoload(obj->progs.netdata_syncfs_entry, false);
-
- if (idx != NETDATA_SYNC_MSYNC_IDX)
- bpf_program__set_autoload(obj->progs.netdata_msync_entry, false);
-
- if (idx != NETDATA_SYNC_FSYNC_IDX)
- bpf_program__set_autoload(obj->progs.netdata_fsync_entry, false);
-
- if (idx != NETDATA_SYNC_FDATASYNC_IDX)
- bpf_program__set_autoload(obj->progs.netdata_fdatasync_entry, false);
-
- if (idx != NETDATA_SYNC_SYNC_FILE_RANGE_IDX)
- bpf_program__set_autoload(obj->progs.netdata_sync_file_range_entry, false);
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param map the map loaded.
- * @param obj the main structure for bpf objects.
- */
-static void ebpf_sync_set_hash_tables(ebpf_local_maps_t *map, struct sync_bpf *obj)
-{
- map->map_fd = bpf_map__fd(obj->maps.tbl_sync);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em the structure with configuration
- * @param target the syscall that we are attaching a tracer.
- * @param idx the index for the main structure
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t *em, char *target,
- sync_syscalls_index_t idx)
-{
- netdata_ebpf_targets_t *synct = em->targets;
- netdata_ebpf_program_loaded_t test = synct[NETDATA_SYNC_SYNC_IDX].mode;
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_sync_disable_probe(obj);
- ebpf_sync_disable_tracepoints(obj, NETDATA_SYNC_IDX_END);
-
- bpf_program__set_attach_target(obj->progs.netdata_sync_fentry, 0,
- target);
- } else if (test == EBPF_LOAD_PROBE ||
- test == EBPF_LOAD_RETPROBE) {
- ebpf_sync_disable_tracepoints(obj, NETDATA_SYNC_IDX_END);
- ebpf_sync_disable_trampoline(obj);
- } else {
- ebpf_sync_disable_probe(obj);
- ebpf_sync_disable_trampoline(obj);
-
- ebpf_sync_disable_tracepoints(obj, idx);
- }
-
- ebpf_update_map_type(obj->maps.tbl_sync, &em->maps[NETDATA_SYNC_GLOBAL_TABLE]);
-
- int ret = sync_bpf__load(obj);
- if (!ret) {
- if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE) {
- ret = sync_bpf__attach(obj);
- } else {
- obj->links.netdata_sync_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_sync_kprobe,
- false, target);
- ret = (int)libbpf_get_error(obj->links.netdata_sync_kprobe);
- }
-
- if (!ret)
- ebpf_sync_set_hash_tables(&em->maps[NETDATA_SYNC_GLOBAL_TABLE], obj);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * CLEANUP THREAD
- *
- *****************************************************************/
-
-/**
- * Cleanup Objects
- *
- * Cleanup loaded objects when thread was initialized.
- */
-void ebpf_sync_cleanup_objects()
-{
- int i;
- for (i = 0; local_syscalls[i].syscall; i++) {
- ebpf_sync_syscalls_t *w = &local_syscalls[i];
-#ifdef LIBBPF_MAJOR_VERSION
- if (w->sync_obj) {
- sync_bpf__destroy(w->sync_obj);
- w->sync_obj = NULL;
- }
-#endif
- if (w->probe_links) {
- ebpf_unload_legacy_code(w->objects, w->probe_links);
- w->objects = NULL;
- w->probe_links = NULL;
- }
- }
-}
-
-/*
- static void ebpf_create_sync_chart(char *id,
- char *title,
- int order,
- int idx,
- int end,
- int update_every)
- {
- ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, title, EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SYNC);
- */
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_sync_global(ebpf_module_t *em)
-{
- if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled)
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_FILE_SYNC_CHART,
- "",
- "Monitor calls to fsync(2) and fdatasync(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21300,
- em->update_every);
-
- if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled)
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_MSYNC_CHART,
- "",
- "Monitor calls to msync(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21301,
- em->update_every);
-
- if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled)
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_SYNC_CHART,
- "",
- "Monitor calls to sync(2) and syncfs(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21302,
- em->update_every);
-
- if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled)
- ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_FILE_SEGMENT_CHART,
- "",
- "Monitor calls to sync_file_range(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- 21303,
- em->update_every);
-}
-
-/**
- * Exit
- *
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_sync_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- ebpf_obsolete_sync_global(em);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_sync_cleanup_objects();
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * INITIALIZE THREAD
- *
- *****************************************************************/
-
-/**
- * Load Legacy
- *
- * Load legacy code.
- *
- * @param w is the sync output structure with pointers to objects loaded.
- * @param em is structure with configuration
- *
- * @return 0 on success and -1 otherwise.
- */
-static int ebpf_sync_load_legacy(ebpf_sync_syscalls_t *w, ebpf_module_t *em)
-{
- em->info.thread_name = w->syscall;
- if (!w->probe_links) {
- w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &w->objects);
- if (!w->probe_links) {
- return -1;
- }
- }
-
- return 0;
-}
-
-/*
- * Initialize Syscalls
- *
- * Load the eBPF programs to monitor syscalls
- *
- * @return 0 on success and -1 otherwise.
- */
-static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(sync_maps, em->maps_per_core, running_on_kernel);
- ebpf_define_map_type(syncfs_maps, em->maps_per_core, running_on_kernel);
- ebpf_define_map_type(msync_maps, em->maps_per_core, running_on_kernel);
- ebpf_define_map_type(fsync_maps, em->maps_per_core, running_on_kernel);
- ebpf_define_map_type(fdatasync_maps, em->maps_per_core, running_on_kernel);
- ebpf_define_map_type(sync_file_range_maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int i;
- const char *saved_name = em->info.thread_name;
- int errors = 0;
- for (i = 0; local_syscalls[i].syscall; i++) {
- ebpf_sync_syscalls_t *w = &local_syscalls[i];
- w->sync_maps = local_syscalls[i].sync_maps;
- em->maps = local_syscalls[i].sync_maps;
- if (w->enabled) {
- if (em->load & EBPF_LOAD_LEGACY) {
- if (ebpf_sync_load_legacy(w, em))
- errors++;
-
- em->info.thread_name = saved_name;
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- char syscall[NETDATA_EBPF_MAX_SYSCALL_LENGTH];
- ebpf_select_host_prefix(syscall, NETDATA_EBPF_MAX_SYSCALL_LENGTH, w->syscall, running_on_kernel);
- if (ebpf_is_function_inside_btf(default_btf, syscall)) {
- w->sync_obj = sync_bpf__open();
- if (!w->sync_obj) {
- w->enabled = false;
- errors++;
- } else {
- if (ebpf_sync_load_and_attach(w->sync_obj, em, syscall, i)) {
- w->enabled = false;
- errors++;
- }
- }
- } else {
- netdata_log_info("Cannot find syscall %s we are not going to monitor it.", syscall);
- w->enabled = false;
- }
-
- em->info.thread_name = saved_name;
- }
-#endif
- }
- }
- em->info.thread_name = saved_name;
-
- memset(sync_counter_aggregated_data, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_syscall_stat_t));
- memset(sync_counter_publish_aggregated, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_publish_syscall_t));
- memset(sync_hash_values, 0 , NETDATA_SYNC_IDX_END * sizeof(netdata_idx_t));
-
- return (errors) ? -1 : 0;
-}
-
-/*****************************************************************
- *
- * DATA THREAD
- *
- *****************************************************************/
-
-/**
- * Read global table
- *
- * Read the table with number of calls for all functions
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_sync_read_global_table(int maps_per_core)
-{
- netdata_idx_t stored[NETDATA_MAX_PROCESSOR];
- uint32_t idx = NETDATA_SYNC_CALL;
- int i;
- for (i = 0; local_syscalls[i].syscall; i++) {
- ebpf_sync_syscalls_t *w = &local_syscalls[i];
- if (w->enabled) {
- int fd = w->sync_maps[NETDATA_SYNC_GLOBAL_TABLE].map_fd;
- if (!bpf_map_lookup_elem(fd, &idx, &stored)) {
- int j, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (j = 0; j < end ;j++ )
- total += stored[j];
-
- sync_hash_values[i] = total;
- }
- }
- }
-}
-
-/**
- * Create Sync charts
- *
- * Create charts and dimensions according user input.
- *
- * @param id chart id
- * @param idx the first index with data.
- * @param end the last index with data.
- */
-static void ebpf_send_sync_chart(char *id,
- int idx,
- int end)
-{
- ebpf_write_begin_chart(NETDATA_EBPF_MEMORY_GROUP, id, "");
-
- netdata_publish_syscall_t *move = &sync_counter_publish_aggregated[idx];
-
- while (move && idx <= end) {
- if (local_syscalls[idx].enabled)
- write_chart_dimension(move->name, (long long)sync_hash_values[idx]);
-
- move = move->next;
- idx++;
- }
-
- ebpf_write_end_chart();
-}
-
-/**
- * Send data
- *
- * Send global charts to Netdata
- */
-static void sync_send_data()
-{
- if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) {
- ebpf_send_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART, NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX);
- }
-
- if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled)
- ebpf_one_dimension_write_charts(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_MSYNC_CHART,
- sync_counter_publish_aggregated[NETDATA_SYNC_MSYNC_IDX].dimension,
- sync_hash_values[NETDATA_SYNC_MSYNC_IDX]);
-
- if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) {
- ebpf_send_sync_chart(NETDATA_EBPF_SYNC_CHART, NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX);
- }
-
- if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled)
- ebpf_one_dimension_write_charts(NETDATA_EBPF_MEMORY_GROUP, NETDATA_EBPF_FILE_SEGMENT_CHART,
- sync_counter_publish_aggregated[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].dimension,
- sync_hash_values[NETDATA_SYNC_SYNC_FILE_RANGE_IDX]);
-}
-
-/**
-* Main loop for this collector.
-*/
-static void sync_collector(ebpf_module_t *em)
-{
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- ebpf_sync_read_global_table(maps_per_core);
- pthread_mutex_lock(&lock);
-
- sync_send_data();
-
- pthread_mutex_unlock(&lock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-
-/*****************************************************************
- *
- * MAIN THREAD
- *
- *****************************************************************/
-
-/**
- * Create Sync charts
- *
- * Create charts and dimensions according user input.
- *
- * @param id chart id
- * @param title chart title
- * @param order order number of the specified chart
- * @param idx the first index with data.
- * @param end the last index with data.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_sync_chart(char *id,
- char *title,
- int order,
- int idx,
- int end,
- int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, "", title, EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order,
- update_every,
- NETDATA_EBPF_MODULE_NAME_SYNC);
-
- netdata_publish_syscall_t *move = &sync_counter_publish_aggregated[idx];
-
- while (move && idx <= end) {
- if (local_syscalls[idx].enabled)
- ebpf_write_global_dimension(move->name, move->dimension, move->algorithm);
-
- move = move->next;
- idx++;
- }
-}
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_sync_charts(int update_every)
-{
- if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled)
- ebpf_create_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART,
- "Monitor calls to fsync(2) and fdatasync(2).", 21300,
- NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX, update_every);
-
- if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled)
- ebpf_create_sync_chart(NETDATA_EBPF_MSYNC_CHART,
- "Monitor calls to msync(2).", 21301,
- NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX, update_every);
-
- if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled && local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled)
- ebpf_create_sync_chart(NETDATA_EBPF_SYNC_CHART,
- "Monitor calls to sync(2) and syncfs(2).", 21302,
- NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX, update_every);
-
- if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled)
- ebpf_create_sync_chart(NETDATA_EBPF_FILE_SEGMENT_CHART,
- "Monitor calls to sync_file_range(2).", 21303,
- NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX, update_every);
-
- fflush(stdout);
-}
-
-/**
- * Parse Syscalls
- *
- * Parse syscall options available inside ebpf.d/sync.conf
- */
-static void ebpf_sync_parse_syscalls()
-{
- int i;
- for (i = 0; local_syscalls[i].syscall; i++) {
- local_syscalls[i].enabled = appconfig_get_boolean(&sync_config, NETDATA_SYNC_CONFIG_NAME,
- local_syscalls[i].syscall, CONFIG_BOOLEAN_YES);
- }
-}
-
-/**
- * Set sync maps
- *
- * When thread is initialized the variable sync_maps is set as null,
- * this function fills the variable before to use.
- */
-static void ebpf_set_sync_maps()
-{
- local_syscalls[NETDATA_SYNC_SYNC_IDX].sync_maps = sync_maps;
- local_syscalls[NETDATA_SYNC_SYNCFS_IDX].sync_maps = syncfs_maps;
- local_syscalls[NETDATA_SYNC_MSYNC_IDX].sync_maps = msync_maps;
- local_syscalls[NETDATA_SYNC_FSYNC_IDX].sync_maps = fsync_maps;
- local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].sync_maps = fdatasync_maps;
- local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].sync_maps = sync_file_range_maps;
-}
-
-/**
- * Sync thread
- *
- * Thread used to make sync thread
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_sync_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_sync_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- ebpf_set_sync_maps();
- ebpf_sync_parse_syscalls();
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_sync_initialize_syscall(em)) {
- goto endsync;
- }
-
- int algorithms[NETDATA_SYNC_IDX_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
- ebpf_global_labels(sync_counter_aggregated_data, sync_counter_publish_aggregated,
- sync_counter_dimension_name, sync_counter_dimension_name,
- algorithms, NETDATA_SYNC_IDX_END);
-
- pthread_mutex_lock(&lock);
- ebpf_create_sync_charts(em->update_every);
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&lock);
-
- sync_collector(em);
-
-endsync:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h
deleted file mode 100644
index bd1bb78b0..000000000
--- a/collectors/ebpf.plugin/ebpf_sync.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_SYNC_H
-#define NETDATA_EBPF_SYNC_H 1
-
-#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/sync.skel.h"
-#endif
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_SYNC "sync"
-#define NETDATA_EBPF_SYNC_MODULE_DESC "Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2)."
-
-// charts
-#define NETDATA_EBPF_SYNC_CHART "sync"
-#define NETDATA_EBPF_MSYNC_CHART "memory_map"
-#define NETDATA_EBPF_FILE_SYNC_CHART "file_sync"
-#define NETDATA_EBPF_FILE_SEGMENT_CHART "file_segment"
-#define NETDATA_EBPF_SYNC_SUBMENU "synchronization (eBPF)"
-
-#define NETDATA_SYSCALLS_SYNC "sync"
-#define NETDATA_SYSCALLS_SYNCFS "syncfs"
-#define NETDATA_SYSCALLS_MSYNC "msync"
-#define NETDATA_SYSCALLS_FSYNC "fsync"
-#define NETDATA_SYSCALLS_FDATASYNC "fdatasync"
-#define NETDATA_SYSCALLS_SYNC_FILE_RANGE "sync_file_range"
-
-#define NETDATA_EBPF_SYNC_SLEEP_MS 800000ULL
-
-// configuration file
-#define NETDATA_SYNC_CONFIG_FILE "sync.conf"
-#define NETDATA_SYNC_CONFIG_NAME "syscalls"
-
-typedef enum sync_syscalls_index {
- NETDATA_SYNC_SYNC_IDX,
- NETDATA_SYNC_SYNCFS_IDX,
- NETDATA_SYNC_MSYNC_IDX,
- NETDATA_SYNC_FSYNC_IDX,
- NETDATA_SYNC_FDATASYNC_IDX,
- NETDATA_SYNC_SYNC_FILE_RANGE_IDX,
-
- NETDATA_SYNC_IDX_END
-} sync_syscalls_index_t;
-
-enum netdata_sync_charts {
- NETDATA_SYNC_CALL,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_SYNC_END
-};
-
-enum netdata_sync_table {
- NETDATA_SYNC_GLOBAL_TABLE
-};
-
-void *ebpf_sync_thread(void *ptr);
-extern struct config sync_config;
-extern netdata_ebpf_targets_t sync_targets[];
-
-#endif /* NETDATA_EBPF_SYNC_H */
diff --git a/collectors/ebpf.plugin/ebpf_unittest.c b/collectors/ebpf.plugin/ebpf_unittest.c
deleted file mode 100644
index 11b449e03..000000000
--- a/collectors/ebpf.plugin/ebpf_unittest.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf_unittest.h"
-
-ebpf_module_t test_em;
-
-/**
- * Initialize structure
- *
- * Initialize structure used to run unittests
- */
-void ebpf_ut_initialize_structure(netdata_run_mode_t mode)
-{
- memset(&test_em, 0, sizeof(ebpf_module_t));
- test_em.info.thread_name = strdupz("process");
- test_em.info.config_name = test_em.info.thread_name;
- test_em.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 |
- NETDATA_V5_14;
- test_em.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE;
- test_em.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT;
- test_em.mode = mode;
-}
-
-/**
- * Clean UP Memory
- *
- * Clean up allocated data during unit test;
- */
-void ebpf_ut_cleanup_memory()
-{
- freez((void *)test_em.info.thread_name);
-}
-
-/**
- * Load Binary
- *
- * Test load of legacy eBPF programs.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_ut_load_binary()
-{
- test_em.probe_links = ebpf_load_program(ebpf_plugin_dir, &test_em, running_on_kernel, isrh, &test_em.objects);
- if (!test_em.probe_links)
- return -1;
-
- ebpf_unload_legacy_code(test_em.objects, test_em.probe_links);
-
- return 0;
-}
-
-/**
- * Load Real Binary
- *
- * Load an existent binary inside plugin directory.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-int ebpf_ut_load_real_binary()
-{
- return ebpf_ut_load_binary();
-}
-/**
- * Load fake Binary
- *
- * Try to load a binary not generated by netdata.
- *
- * @return It returns 0 on success and -1 otherwise. The success for this function means we could work properly with
- * expected fails.
- */
-int ebpf_ut_load_fake_binary()
-{
- const char *original = test_em.info.thread_name;
-
- test_em.info.thread_name = strdupz("I_am_not_here");
- int ret = ebpf_ut_load_binary();
-
- ebpf_ut_cleanup_memory();
-
- test_em.info.thread_name = original;
-
- return !ret;
-}
diff --git a/collectors/ebpf.plugin/ebpf_unittest.h b/collectors/ebpf.plugin/ebpf_unittest.h
deleted file mode 100644
index 429cbe628..000000000
--- a/collectors/ebpf.plugin/ebpf_unittest.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef NETDATA_EBPF_PLUGIN_UNITTEST_H_
-# define NETDATA_EBPF_PLUGIN_UNITTEST_H_ 1
-
-#include "ebpf.h"
-
-void ebpf_ut_initialize_structure(netdata_run_mode_t mode);
-int ebpf_ut_load_real_binary();
-int ebpf_ut_load_fake_binary();
-void ebpf_ut_cleanup_memory();
-#endif
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
deleted file mode 100644
index 354901c9c..000000000
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ /dev/null
@@ -1,2522 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/resource.h>
-
-#include "ebpf.h"
-#include "ebpf_vfs.h"
-
-static char *vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_END] = { "delete", "read", "write",
- "fsync", "open", "create" };
-static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_read", "vfs_write",
- "vfs_fsync", "vfs_open", "vfs_create"};
-
-static netdata_idx_t *vfs_hash_values = NULL;
-static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END];
-static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END];
-netdata_publish_vfs_t *vfs_vector = NULL;
-
-static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0, .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_HASH
-#endif
- },
- {.name = "tbl_vfs_stats", .internal_input = NETDATA_VFS_COUNTER,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = "vfs_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- },
- {.name = NULL, .internal_input = 0, .user_input = 0,
-#ifdef LIBBPF_MAJOR_VERSION
- .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
-#endif
- }};
-
-struct config vfs_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_read", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_readv", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_unlink", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_fsync", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_open", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "vfs_create", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = "release_task", .mode = EBPF_LOAD_TRAMPOLINE},
- {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-
-#ifdef NETDATA_DEV_MODE
-int vfs_disable_priority;
-#endif
-
-#ifdef LIBBPF_MAJOR_VERSION
-/**
- * Disable probe
- *
- * Disable all probes to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects
- */
-static void ebpf_vfs_disable_probes(struct vfs_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_vfs_write_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_write_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_writev_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_writev_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_read_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_read_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_readv_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_readv_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_open_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_open_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_create_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_create_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false);
-}
-
-/*
- * Disable trampoline
- *
- * Disable all trampoline to use exclusively another method.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_vfs_disable_trampoline(struct vfs_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_vfs_write_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_write_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_writev_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_writev_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_read_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_read_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_readv_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_readv_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_unlink_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_fsync_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_open_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_open_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_create_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false);
-}
-
-/**
- * Set trampoline target
- *
- * Set the targets we will monitor.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_vfs_set_trampoline_target(struct vfs_bpf *obj)
-{
- bpf_program__set_attach_target(obj->progs.netdata_vfs_write_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_WRITE].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_write_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_WRITE].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_writev_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_WRITEV].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_writev_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_WRITEV].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_read_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_READ].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_read_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_READ].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_readv_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_READV].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_readv_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_READV].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_unlink_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_UNLINK].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_fsync_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_fsync_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_open_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_open_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_create_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_release_task_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP);
-}
-
-/**
- * Attach Probe
- *
- * Attach probes to target
- *
- * @param obj is the main structure for bpf objects.
- *
- * @return It returns 0 on success and -1 otherwise.
- */
-static int ebpf_vfs_attach_probe(struct vfs_bpf *obj)
-{
- obj->links.netdata_vfs_write_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_write_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_WRITE].name);
- int ret = libbpf_get_error(obj->links.netdata_vfs_write_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_write_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_write_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_WRITE].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_write_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_writev_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_writev_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_WRITEV].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_writev_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_writev_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_writev_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_WRITEV].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_writev_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_read_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_read_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_READ].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_read_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_read_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_read_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_READ].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_read_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_readv_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_readv_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_READV].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_readv_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_readv_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_readv_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_READV].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_readv_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_unlink_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_unlink_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_UNLINK].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_unlink_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_unlink_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_unlink_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_UNLINK].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_unlink_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_fsync_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_fsync_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_open_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_open_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_create_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_create_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_create_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_create_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_fsync_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_fsync_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_fsync_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_FSYNC].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_fsync_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_open_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_open_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_open_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_open_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_create_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kprobe, false,
- vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_create_kprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_create_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_create_kretprobe, true,
- vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
- ret = libbpf_get_error(obj->links.netdata_vfs_create_kretprobe);
- if (ret)
- return -1;
-
- obj->links.netdata_vfs_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_release_task_fentry,
- true,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_vfs_release_task_kprobe);
- if (ret)
- return -1;
-
- return 0;
-}
-
-/**
- * Adjust Size
- *
- * Resize maps according input from users.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- */
-static void ebpf_vfs_adjust_map(struct vfs_bpf *obj, ebpf_module_t *em)
-{
- ebpf_update_map_size(obj->maps.tbl_vfs_pid, &vfs_maps[NETDATA_VFS_PID],
- em, bpf_map__name(obj->maps.tbl_vfs_pid));
-
- ebpf_update_map_type(obj->maps.tbl_vfs_pid, &vfs_maps[NETDATA_VFS_PID]);
- ebpf_update_map_type(obj->maps.tbl_vfs_stats, &vfs_maps[NETDATA_VFS_ALL]);
- ebpf_update_map_type(obj->maps.vfs_ctrl, &vfs_maps[NETDATA_VFS_CTRL]);
-}
-
-/**
- * Set hash tables
- *
- * Set the values for maps according the value given by kernel.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_vfs_set_hash_tables(struct vfs_bpf *obj)
-{
- vfs_maps[NETDATA_VFS_ALL].map_fd = bpf_map__fd(obj->maps.tbl_vfs_stats);
- vfs_maps[NETDATA_VFS_PID].map_fd = bpf_map__fd(obj->maps.tbl_vfs_pid);
- vfs_maps[NETDATA_VFS_CTRL].map_fd = bpf_map__fd(obj->maps.vfs_ctrl);
-}
-
-/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_vfs_disable_release_task(struct vfs_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false);
-}
-
-/**
- * Load and attach
- *
- * Load and attach the eBPF code in kernel.
- *
- * @param obj is the main structure for bpf objects.
- * @param em structure with configuration
- *
- * @return it returns 0 on success and -1 otherwise
- */
-static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *em)
-{
- netdata_ebpf_targets_t *mt = em->targets;
- netdata_ebpf_program_loaded_t test = mt[NETDATA_EBPF_VFS_WRITE].mode;
-
- if (test == EBPF_LOAD_TRAMPOLINE) {
- ebpf_vfs_disable_probes(obj);
-
- ebpf_vfs_set_trampoline_target(obj);
- } else {
- ebpf_vfs_disable_trampoline(obj);
- }
-
- ebpf_vfs_adjust_map(obj, em);
-
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_vfs_disable_release_task(obj);
-
- int ret = vfs_bpf__load(obj);
- if (ret) {
- return ret;
- }
-
- ret = (test == EBPF_LOAD_TRAMPOLINE) ? vfs_bpf__attach(obj) : ebpf_vfs_attach_probe(obj);
- if (!ret) {
- ebpf_vfs_set_hash_tables(obj);
-
- ebpf_update_controller(vfs_maps[NETDATA_VFS_CTRL].map_fd, em);
- }
-
- return ret;
-}
-#endif
-
-/*****************************************************************
- *
- * FUNCTIONS TO CLOSE THE THREAD
- *
- *****************************************************************/
-
-static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em);
-
-/**
- * Obsolete services
- *
- * Obsolete all service charts created
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_FILE_DELETED,
- "",
- "Files deleted",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20065,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
- "",
- "Write to disk",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20066,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
- "",
- "Fails to write",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20067,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
- "",
- "Read from disk",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20068,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
- "",
- "Fails to read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20069,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
- "",
- "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20070,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
- "",
- "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20071,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_FSYNC,
- "",
- "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20072,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR,
- "",
- "Sync error",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20073,
- em->update_every);
- }
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_OPEN,
- "",
- "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20074,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR,
- "",
- "Open error",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20075,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_CREATE,
- "",
- "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20076,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
- NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR,
- "",
- "Create error",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- NULL,
- 20077,
- em->update_every);
- }
-}
-
-/**
- * Obsolete cgroup chart
- *
- * Send obsolete for all charts created before to close.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static inline void ebpf_obsolete_vfs_cgroup_charts(ebpf_module_t *em) {
- pthread_mutex_lock(&mutex_cgroup_shm);
-
- ebpf_obsolete_vfs_services(em);
-
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- ebpf_obsolete_specific_vfs_charts(ect->name, em);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Obsolette apps charts
- *
- * Obsolete apps charts.
- *
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_obsolete_vfs_apps_charts(struct ebpf_module *em)
-{
- int order = 20275;
- struct ebpf_target *w;
- int update_every = em->update_every;
- for (w = apps_groups_root_target; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_VFS_IDX))))
- continue;
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_unlink",
- "Files deleted.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_unlink",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write",
- "Write to disk.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write",
- order++,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write_error",
- "Fails to write.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write_error",
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read",
- "Read from disk.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read",
- order++,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read_error",
- "Fails to read.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read_error",
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write_bytes",
- "Bytes written on disk.",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write_bytes",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read_bytes",
- "Bytes read from disk.",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read_bytes",
- order++,
- update_every);
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_fsync",
- "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_fsync",
- order++,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_fsync_error",
- "Fails to sync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_fsync_error",
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_open",
- "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_open",
- order++,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_open_error",
- "Fails to open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_open_error",
- order++,
- update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_create",
- "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_create",
- order++,
- update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_create_error",
- "Fails to create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_create_error",
- order++,
- update_every);
- }
- w->charts_created &= ~(1<<EBPF_MODULE_VFS_IDX);
- }
-}
-
-/**
- * Obsolete global
- *
- * Obsolete global charts created by thread.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-static void ebpf_obsolete_vfs_global(ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_CLEAN_COUNT,
- "",
- "Remove files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_IO_COUNT,
- "",
- "Calls to IO",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_IO_FILE_BYTES,
- "",
- "Bytes written and read",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_ERR_COUNT,
- "",
- "Fails to write or read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FSYNC,
- "",
- "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FSYNC_ERR,
- "",
- "Fails to synchronize",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_OPEN,
- "",
- "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_OPEN_ERR,
- "",
- "Fails to open a file",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN,
- em->update_every);
- }
-
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_CREATE,
- "",
- "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE,
- em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_CREATE_ERR,
- "",
- "Fails to create a file.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE,
- em->update_every);
- }
-}
-
-/**
- * Exit
- *
- * Cancel thread and exit.
- *
- * @param ptr thread data.
-**/
-static void ebpf_vfs_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- pthread_mutex_lock(&lock);
- if (em->cgroup_charts) {
- ebpf_obsolete_vfs_cgroup_charts(em);
- fflush(stdout);
- }
-
- if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
- ebpf_obsolete_vfs_apps_charts(em);
- }
-
- ebpf_obsolete_vfs_global(em);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- ebpf_statistic_obsolete_aral_chart(em, vfs_disable_priority);
-#endif
-
- fflush(stdout);
- pthread_mutex_unlock(&lock);
- }
-
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_REMOVE);
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (vfs_bpf_obj) {
- vfs_bpf__destroy(vfs_bpf_obj);
- vfs_bpf_obj = NULL;
- }
-#endif
- if (em->objects) {
- ebpf_unload_legacy_code(em->objects, em->probe_links);
- em->objects = NULL;
- em->probe_links = NULL;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- em->enabled = NETDATA_THREAD_EBPF_STOPPED;
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS WITH THE MAIN LOOP
- *
- *****************************************************************/
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
-*/
-static void ebpf_vfs_send_data(ebpf_module_t *em)
-{
- netdata_publish_vfs_common_t pvc;
-
- pvc.write = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes;
- pvc.read = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes;
-
- write_count_chart(NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], 1);
-
- write_count_chart(NETDATA_VFS_FILE_IO_COUNT, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
-
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_VFS_FILE_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
- }
-
- write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_FILESYSTEM_FAMILY, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
- (long long)pvc.write, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ], (long long)pvc.read);
-
- write_count_chart(NETDATA_VFS_FSYNC, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
-
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_VFS_FSYNC_ERR, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
- }
-
- write_count_chart(NETDATA_VFS_OPEN, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
-
- if (em->mode < MODE_ENTRY) {
- write_err_chart(NETDATA_VFS_OPEN_ERR, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
- }
-
- write_count_chart(NETDATA_VFS_CREATE, NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], 1);
-
- if (em->mode < MODE_ENTRY) {
- write_err_chart(
- NETDATA_VFS_CREATE_ERR,
- NETDATA_FILESYSTEM_FAMILY,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1);
- }
-}
-
-/**
- * Read the hash table and store data to allocated vectors.
- *
- * @param stats vector used to read data from control table.
- * @param maps_per_core do I need to read all cores?
- */
-static void ebpf_vfs_read_global_table(netdata_idx_t *stats, int maps_per_core)
-{
- netdata_idx_t res[NETDATA_VFS_COUNTER];
- ebpf_read_global_table_stats(res,
- vfs_hash_values,
- vfs_maps[NETDATA_VFS_ALL].map_fd,
- maps_per_core,
- NETDATA_KEY_CALLS_VFS_WRITE,
- NETDATA_VFS_COUNTER);
-
- ebpf_read_global_table_stats(stats,
- vfs_hash_values,
- vfs_maps[NETDATA_VFS_CTRL].map_fd,
- maps_per_core,
- NETDATA_CONTROLLER_PID_TABLE_ADD,
- NETDATA_CONTROLLER_END);
-
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] +
- res[NETDATA_KEY_CALLS_VFS_READV];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].ncall = res[NETDATA_KEY_CALLS_VFS_WRITE] +
- res[NETDATA_KEY_CALLS_VFS_WRITEV];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].ncall = res[NETDATA_KEY_CALLS_VFS_FSYNC];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].ncall = res[NETDATA_KEY_CALLS_VFS_OPEN];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].ncall = res[NETDATA_KEY_CALLS_VFS_CREATE];
-
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].nerr = res[NETDATA_KEY_ERROR_VFS_UNLINK];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].nerr = res[NETDATA_KEY_ERROR_VFS_READ] +
- res[NETDATA_KEY_ERROR_VFS_READV];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].nerr = res[NETDATA_KEY_ERROR_VFS_WRITE] +
- res[NETDATA_KEY_ERROR_VFS_WRITEV];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].nerr = res[NETDATA_KEY_ERROR_VFS_FSYNC];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].nerr = res[NETDATA_KEY_ERROR_VFS_OPEN];
- vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].nerr = res[NETDATA_KEY_ERROR_VFS_CREATE];
-
- vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
- vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param swap output structure
- * @param root link list with structure to be used
- */
-static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct ebpf_pid_on_target *root)
-{
- netdata_publish_vfs_t accumulator;
- memset(&accumulator, 0, sizeof(accumulator));
-
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_vfs_t *w = vfs_pid[pid];
- if (w) {
- accumulator.write_call += w->write_call;
- accumulator.writev_call += w->writev_call;
- accumulator.read_call += w->read_call;
- accumulator.readv_call += w->readv_call;
- accumulator.unlink_call += w->unlink_call;
- accumulator.fsync_call += w->fsync_call;
- accumulator.open_call += w->open_call;
- accumulator.create_call += w->create_call;
-
- accumulator.write_bytes += w->write_bytes;
- accumulator.writev_bytes += w->writev_bytes;
- accumulator.read_bytes += w->read_bytes;
- accumulator.readv_bytes += w->readv_bytes;
-
- accumulator.write_err += w->write_err;
- accumulator.writev_err += w->writev_err;
- accumulator.read_err += w->read_err;
- accumulator.readv_err += w->readv_err;
- accumulator.unlink_err += w->unlink_err;
- accumulator.fsync_err += w->fsync_err;
- accumulator.open_err += w->open_err;
- accumulator.create_err += w->create_err;
- }
- root = root->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
- vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
- vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
- vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
- vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
- vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
- vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
- vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
-
- vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
- vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
- vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
- vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
-
- vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
- vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
- vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
- vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
- vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
- vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
- vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
- vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- * @param root the target list.
- */
-void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
-{
- struct ebpf_target *w;
- for (w = root; w; w = w->next) {
- if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_VFS_IDX))))
- continue;
-
- ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_unlink");
- write_chart_dimension("calls", w->vfs.unlink_call);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write");
- write_chart_dimension("calls", w->vfs.write_call + w->vfs.writev_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write_error");
- write_chart_dimension("calls", w->vfs.write_err + w->vfs.writev_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read");
- write_chart_dimension("calls", w->vfs.read_call + w->vfs.readv_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read_error");
- write_chart_dimension("calls", w->vfs.read_err + w->vfs.readv_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_write_bytes");
- write_chart_dimension("writes", w->vfs.write_bytes + w->vfs.writev_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_read_bytes");
- write_chart_dimension("reads", w->vfs.read_bytes + w->vfs.readv_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_fsync");
- write_chart_dimension("calls", w->vfs.fsync_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_fsync_error");
- write_chart_dimension("calls", w->vfs.fsync_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_open");
- write_chart_dimension("calls", w->vfs.open_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_open_error");
- write_chart_dimension("calls", w->vfs.open_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_create");
- write_chart_dimension("calls", w->vfs.create_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_create_error");
- write_chart_dimension("calls", w->vfs.create_err);
- ebpf_write_end_chart();
- }
- }
-}
-
-/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- */
-static void vfs_apps_accumulator(netdata_publish_vfs_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_publish_vfs_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_publish_vfs_t *w = &out[i];
-
- total->write_call += w->write_call;
- total->writev_call += w->writev_call;
- total->read_call += w->read_call;
- total->readv_call += w->readv_call;
- total->unlink_call += w->unlink_call;
-
- total->write_bytes += w->write_bytes;
- total->writev_bytes += w->writev_bytes;
- total->read_bytes += w->read_bytes;
- total->readv_bytes += w->readv_bytes;
-
- total->write_err += w->write_err;
- total->writev_err += w->writev_err;
- total->read_err += w->read_err;
- total->readv_err += w->readv_err;
- total->unlink_err += w->unlink_err;
- }
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
-{
- netdata_publish_vfs_t *curr = vfs_pid[current_pid];
- if (!curr) {
- curr = ebpf_vfs_get();
- vfs_pid[current_pid] = curr;
- }
-
- memcpy(curr, &publish[0], sizeof(netdata_publish_vfs_t));
-}
-
-/**
- * Read the hash table and store data to allocated vectors.
- */
-static void ebpf_vfs_read_apps(int maps_per_core)
-{
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- netdata_publish_vfs_t *vv = vfs_vector;
- int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
- size_t length = sizeof(netdata_publish_vfs_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- uint32_t key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, vv)) {
- pids = pids->next;
- continue;
- }
-
- vfs_apps_accumulator(vv, maps_per_core);
-
- vfs_fill_pid(key, vv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(vv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
- * Update cgroup
- *
- * Update cgroup data based in PID.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_update_vfs_cgroup(int maps_per_core)
-{
- ebpf_cgroup_target_t *ect ;
- netdata_publish_vfs_t *vv = vfs_vector;
- int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
- size_t length = sizeof(netdata_publish_vfs_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- struct pid_on_target2 *pids;
- for (pids = ect->pids; pids; pids = pids->next) {
- int pid = pids->pid;
- netdata_publish_vfs_t *out = &pids->vfs;
- if (likely(vfs_pid) && vfs_pid[pid]) {
- netdata_publish_vfs_t *in = vfs_pid[pid];
-
- memcpy(out, in, sizeof(netdata_publish_vfs_t));
- } else {
- memset(vv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, vv)) {
- vfs_apps_accumulator(vv, maps_per_core);
-
- memcpy(out, vv, sizeof(netdata_publish_vfs_t));
- }
- }
- }
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param vfs structure used to store data
- * @param pids input data
- */
-static void ebpf_vfs_sum_cgroup_pids(netdata_publish_vfs_t *vfs, struct pid_on_target2 *pids)
- {
- netdata_publish_vfs_t accumulator;
- memset(&accumulator, 0, sizeof(accumulator));
-
- while (pids) {
- netdata_publish_vfs_t *w = &pids->vfs;
-
- accumulator.write_call += w->write_call;
- accumulator.writev_call += w->writev_call;
- accumulator.read_call += w->read_call;
- accumulator.readv_call += w->readv_call;
- accumulator.unlink_call += w->unlink_call;
- accumulator.fsync_call += w->fsync_call;
- accumulator.open_call += w->open_call;
- accumulator.create_call += w->create_call;
-
- accumulator.write_bytes += w->write_bytes;
- accumulator.writev_bytes += w->writev_bytes;
- accumulator.read_bytes += w->read_bytes;
- accumulator.readv_bytes += w->readv_bytes;
-
- accumulator.write_err += w->write_err;
- accumulator.writev_err += w->writev_err;
- accumulator.read_err += w->read_err;
- accumulator.readv_err += w->readv_err;
- accumulator.unlink_err += w->unlink_err;
- accumulator.fsync_err += w->fsync_err;
- accumulator.open_err += w->open_err;
- accumulator.create_err += w->create_err;
-
- pids = pids->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
- vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
- vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
- vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
- vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
- vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
- vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
- vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
-
- vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
- vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
- vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
- vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
-
- vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
- vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
- vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
- vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
- vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
- vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
- vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
- vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
-}
-
-/**
- * Create specific VFS charts
- *
- * Create charts for cgroup/application.
- *
- * @param type the chart type.
- * @param em the main thread structure.
- */
-static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em)
-{
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED,"Files deleted",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512,
- ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-}
-
-/**
- * Obsolete specific VFS charts
- *
- * Obsolete charts for cgroup/application.
- *
- * @param type the chart type.
- * @param em the main thread structure.
- */
-static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em)
-{
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "", "Files deleted",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500, em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "", "Write to disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "", "Fails to write",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502, em->update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "", "Read from disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "", "Fails to read",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504, em->update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "", "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505, em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "", "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506, em->update_every);
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "", "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_FSYNC_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "", "Sync error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508, em->update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "", "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_OPEN_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "", "Open error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510, em->update_every);
- }
-
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "", "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_CREATE_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "", "Create error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT,
- NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512, em->update_every);
- }
-}
-
-/*
- * Send specific VFS data
- *
- * Send data for specific cgroup/apps.
- *
- * @param type chart type
- * @param values structure with values that will be sent to netdata
- */
-static void ebpf_send_specific_vfs_data(char *type, netdata_publish_vfs_t *values, ebpf_module_t *em)
-{
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].name, (long long)values->unlink_call);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
- (long long)values->write_call + (long long)values->writev_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
- (long long)values->write_err + (long long)values->writev_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
- (long long)values->read_call + (long long)values->readv_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
- (long long)values->read_err + (long long)values->readv_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
- (long long)values->write_bytes + (long long)values->writev_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
- (long long)values->read_bytes + (long long)values->readv_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name,
- (long long)values->fsync_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name,
- (long long)values->fsync_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name,
- (long long)values->open_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name,
- (long long)values->open_err);
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name,
- (long long)values->create_call);
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "");
- write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name,
- (long long)values->create_err);
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Create Systemd Socket Charts
- *
- * Create charts when systemd is enabled
- *
- * @param em the main collector structure
- **/
-static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em)
-{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20070,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20071,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20072,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20073,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20074,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_OPEN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20075,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20076,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20077,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
-}
-
-/**
- * Send Systemd charts
- *
- * Send collected data to Netdata.
- *
- * @param em the main collector structure
- */
-static void ebpf_send_systemd_vfs_charts(ebpf_module_t *em)
-{
- ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_call +
- ect->publish_systemd_vfs.writev_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_err +
- ect->publish_systemd_vfs.writev_err);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_call +
- ect->publish_systemd_vfs.readv_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_err +
- ect->publish_systemd_vfs.readv_err);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_bytes +
- ect->publish_systemd_vfs.writev_bytes);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_bytes +
- ect->publish_systemd_vfs.readv_bytes);
- }
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_err);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_err);
- }
- }
- ebpf_write_end_chart();
- }
-
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_call);
- }
- }
- ebpf_write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_err);
- }
- }
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Send data to Netdata calling auxiliary functions.
- *
- * @param em the main collector structure
-*/
-static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em)
-{
- if (!ebpf_cgroup_pids)
- return;
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_cgroup_target_t *ect;
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- ebpf_vfs_sum_cgroup_pids(&ect->publish_systemd_vfs, ect->pids);
- }
-
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
- if (send_cgroup_chart) {
- ebpf_create_systemd_vfs_charts(em);
- }
- ebpf_send_systemd_vfs_charts(em);
- }
-
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
- continue;
-
- if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) && ect->updated) {
- ebpf_create_specific_vfs_charts(ect->name, em);
- ect->flags |= NETDATA_EBPF_CGROUP_HAS_VFS_CHART;
- }
-
- if (ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) {
- if (ect->updated) {
- ebpf_send_specific_vfs_data(ect->name, &ect->publish_systemd_vfs, em);
- } else {
- ebpf_obsolete_specific_vfs_charts(ect->name, em);
- ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_VFS_CHART;
- }
- }
- }
-
- pthread_mutex_unlock(&mutex_cgroup_shm);
-}
-
-/**
- * Main loop for this collector.
- *
- * @param step the number of microseconds used with heart beat
- * @param em the structure with thread information
- */
-static void vfs_collector(ebpf_module_t *em)
-{
- int cgroups = em->cgroup_charts;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_every = em->update_every;
- int counter = update_every - 1;
- int maps_per_core = em->maps_per_core;
- uint32_t running_time = 0;
- uint32_t lifetime = em->lifetime;
- netdata_idx_t *stats = em->hash_table_stats;
- memset(stats, 0, sizeof(em->hash_table_stats));
- while (!ebpf_plugin_exit && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
- if (ebpf_plugin_exit || ++counter != update_every)
- continue;
-
- counter = 0;
- netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_vfs_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- ebpf_vfs_read_apps(maps_per_core);
-
- if (cgroups)
- read_update_vfs_cgroup(maps_per_core);
-
- pthread_mutex_lock(&lock);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- ebpf_send_data_aral_chart(ebpf_aral_vfs_pid, em);
-#endif
-
- ebpf_vfs_send_data(em);
- fflush(stdout);
-
- if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_vfs_send_apps_data(em, apps_groups_root_target);
-
- if (cgroups)
- ebpf_vfs_send_cgroup_data(em);
-
- pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (running_time && !em->running_time)
- running_time = update_every;
- else
- running_time += update_every;
-
- em->running_time = running_time;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create IO chart
- *
- * @param family the chart family
- * @param name the chart name
- * @param axis the axis label
- * @param web the group name used to attach the chart on dashboard
- * @param order the order number of the specified chart
- * @param algorithm the algorithm used to make the charts.
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web,
- int order, int algorithm, int update_every)
-{
- printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'filesystem'\n",
- family,
- name,
- axis,
- web,
- order,
- update_every);
-
- printf("DIMENSION %s %s %s 1 1\n",
- vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ],
- vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_READ],
- ebpf_algorithms[algorithm]);
- printf("DIMENSION %s %s %s -1 1\n",
- vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
- vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
- ebpf_algorithms[algorithm]);
-}
-
-/**
- * Create global charts
- *
- * Call ebpf_create_chart to create the charts for the collector.
- *
- * @param em a pointer to the structure with the default values.
- */
-static void ebpf_create_global_charts(ebpf_module_t *em)
-{
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_CLEAN_COUNT,
- "Remove files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_IO_COUNT,
- "Calls to IO",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- ebpf_create_io_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES,
- NETDATA_EBPF_INCREMENTAL_IDX, em->update_every);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FILE_ERR_COUNT,
- "Fails to write or read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
- 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FSYNC,
- "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_FSYNC_ERR,
- "Fails to synchronize",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_OPEN,
- "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_OPEN_ERR,
- "Fails to open a file",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_CREATE,
- "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
- NETDATA_VFS_CREATE_ERR,
- "Fails to create a file.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE,
- ebpf_create_global_dimension,
- &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
- 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
- }
-
- fflush(stdout);
-}
-
-/**
- * Create process apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param em a pointer to the structure with the default values.
- * @param ptr a pointer for the targets.
- **/
-void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
-{
- struct ebpf_target *root = ptr;
- struct ebpf_target *w;
- int order = 20275;
- int update_every = em->update_every;
- for (w = root; w; w = w->next) {
- if (unlikely(!w->exposed))
- continue;
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_unlink",
- "Files deleted.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_unlink",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write",
- "Write to disk.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write_error",
- "Fails to write.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write_error",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read",
- "Read from disk.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read_error",
- "Fails to read.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read_error",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_write_bytes",
- "Bytes written on disk.",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_write_bytes",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION writes '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_read_bytes",
- "Bytes read from disk.",
- EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_read_bytes",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION reads '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_fsync",
- "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_fsync",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_fsync_error",
- "Fails to sync.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_fsync_error",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_open",
- "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_open",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_open_error",
- "Fails to open.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_open_error",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_create",
- "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_create",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
- w->clean_name,
- "_ebpf_call_vfs_create_error",
- "Fails to create a file.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "app.ebpf_call_vfs_create_error",
- order++,
- update_every,
- NETDATA_EBPF_MODULE_NAME_VFS);
- ebpf_create_chart_labels("app_group", w->name, 1);
- ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-
- w->charts_created |= 1<<EBPF_MODULE_VFS_IDX;
- }
-
- em->apps_charts |= NETDATA_EBPF_APPS_FLAG_CHART_CREATED;
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO START THREAD
- *
- *****************************************************************/
-
-/**
- * Allocate vectors used with this thread.
- * We are not testing the return, because callocz does this and shutdown the software
- * case it was not possible to allocate.
- *
- * @param apps is apps enabled?
- */
-static void ebpf_vfs_allocate_global_vectors(int apps)
-{
- if (apps) {
- ebpf_vfs_aral_init();
- vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
- vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
- }
-
- memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
- memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
-
- vfs_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
-}
-
-/*****************************************************************
- *
- * EBPF VFS THREAD
- *
- *****************************************************************/
-
-/*
- * Load BPF
- *
- * Load BPF files.
- *
- * @param em the structure with configuration
- */
-static int ebpf_vfs_load_bpf(ebpf_module_t *em)
-{
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
-#endif
-
- int ret = 0;
- ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_EBPF_VFS_WRITE].mode);
- if (em->load & EBPF_LOAD_LEGACY) {
- em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
- if (!em->probe_links) {
- ret = -1;
- }
- }
-#ifdef LIBBPF_MAJOR_VERSION
- else {
- vfs_bpf_obj = vfs_bpf__open();
- if (!vfs_bpf_obj)
- ret = -1;
- else
- ret = ebpf_vfs_load_and_attach(vfs_bpf_obj, em);
- }
-#endif
-
- return ret;
-}
-
-/**
- * Process thread
- *
- * Thread used to generate process charts.
- *
- * @param ptr a pointer to `struct ebpf_module`
- *
- * @return It always return NULL
- */
-void *ebpf_vfs_thread(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_vfs_exit, ptr);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = vfs_maps;
-
- ebpf_update_pid_table(&vfs_maps[NETDATA_VFS_PID], em);
-
- ebpf_vfs_allocate_global_vectors(em->apps_charts);
-
-#ifdef LIBBPF_MAJOR_VERSION
- ebpf_adjust_thread_load(em, default_btf);
-#endif
- if (ebpf_vfs_load_bpf(em)) {
- goto endvfs;
- }
-
- int algorithms[NETDATA_KEY_PUBLISH_VFS_END] = {
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX
- };
-
- ebpf_global_labels(vfs_aggregated_data, vfs_publish_aggregated, vfs_dimension_names,
- vfs_id_names, algorithms, NETDATA_KEY_PUBLISH_VFS_END);
-
- pthread_mutex_lock(&lock);
- ebpf_create_global_charts(em);
- ebpf_update_stats(&plugin_statistics, em);
- ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- vfs_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_VFS_ARAL_NAME, em);
-#endif
-
- pthread_mutex_unlock(&lock);
-
- vfs_collector(em);
-
-endvfs:
- ebpf_update_disabled_plugin_stats(em);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h
deleted file mode 100644
index 8fe12a7eb..000000000
--- a/collectors/ebpf.plugin/ebpf_vfs.h
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_VFS_H
-#define NETDATA_EBPF_VFS_H 1
-
-// Module name & description
-#define NETDATA_EBPF_MODULE_NAME_VFS "vfs"
-#define NETDATA_EBPF_VFS_MODULE_DESC "Monitor VFS (Virtual File System) functions. This thread is integrated with apps and cgroup."
-
-#define NETDATA_DIRECTORY_VFS_CONFIG_FILE "vfs.conf"
-
-// Global chart name
-#define NETDATA_VFS_FILE_CLEAN_COUNT "vfs_deleted_objects"
-#define NETDATA_VFS_FILE_IO_COUNT "vfs_io"
-#define NETDATA_VFS_FILE_ERR_COUNT "vfs_io_error"
-#define NETDATA_VFS_IO_FILE_BYTES "vfs_io_bytes"
-#define NETDATA_VFS_FSYNC "vfs_fsync"
-#define NETDATA_VFS_FSYNC_ERR "vfs_fsync_error"
-#define NETDATA_VFS_OPEN "vfs_open"
-#define NETDATA_VFS_OPEN_ERR "vfs_open_error"
-#define NETDATA_VFS_CREATE "vfs_create"
-#define NETDATA_VFS_CREATE_ERR "vfs_create_error"
-
-// Charts created on Apps submenu
-#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
-#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
-#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
-#define NETDATA_SYSCALL_APPS_VFS_FSYNC "vfs_fsync"
-#define NETDATA_SYSCALL_APPS_VFS_OPEN "vfs_open"
-#define NETDATA_SYSCALL_APPS_VFS_CREATE "vfs_create"
-
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
-#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
-#define NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR "vfs_fsync_error"
-#define NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR "vfs_open_error"
-#define NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR "vfs_create_error"
-
-// Group used on Dashboard
-#define NETDATA_VFS_GROUP "vfs"
-#define NETDATA_VFS_CGROUP_GROUP "vfs (eBPF)"
-
-// Contexts
-#define NETDATA_CGROUP_VFS_UNLINK_CONTEXT "cgroup.vfs_unlink"
-#define NETDATA_CGROUP_VFS_WRITE_CONTEXT "cgroup.vfs_write"
-#define NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT "cgroup.vfs_write_error"
-#define NETDATA_CGROUP_VFS_READ_CONTEXT "cgroup.vfs_read"
-#define NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT "cgroup.vfs_read_error"
-#define NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT "cgroup.vfs_write_bytes"
-#define NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT "cgroup.vfs_read_bytes"
-#define NETDATA_CGROUP_VFS_CREATE_CONTEXT "cgroup.vfs_create"
-#define NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT "cgroup.vfs_create_error"
-#define NETDATA_CGROUP_VFS_OPEN_CONTEXT "cgroup.vfs_open"
-#define NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT "cgroup.vfs_open_error"
-#define NETDATA_CGROUP_VFS_FSYNC_CONTEXT "cgroup.vfs_fsync"
-#define NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT "cgroup.vfs_fsync_error"
-
-#define NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT "services.vfs_unlink"
-#define NETDATA_SYSTEMD_VFS_WRITE_CONTEXT "services.vfs_write"
-#define NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT "services.vfs_write_error"
-#define NETDATA_SYSTEMD_VFS_READ_CONTEXT "services.vfs_read"
-#define NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT "services.vfs_read_error"
-#define NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT "services.vfs_write_bytes"
-#define NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT "services.vfs_read_bytes"
-#define NETDATA_SYSTEMD_VFS_CREATE_CONTEXT "services.vfs_create"
-#define NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT "services.vfs_create_error"
-#define NETDATA_SYSTEMD_VFS_OPEN_CONTEXT "services.vfs_open"
-#define NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT "services.vfs_open_error"
-#define NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT "services.vfs_fsync"
-#define NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT "services.vfs_fsync_error"
-
-// ARAL name
-#define NETDATA_EBPF_VFS_ARAL_NAME "ebpf_vfs"
-
-typedef struct netdata_publish_vfs {
- uint64_t pid_tgid;
- uint32_t pid;
- uint32_t pad;
-
- //Counter
- uint32_t write_call;
- uint32_t writev_call;
- uint32_t read_call;
- uint32_t readv_call;
- uint32_t unlink_call;
- uint32_t fsync_call;
- uint32_t open_call;
- uint32_t create_call;
-
- //Accumulator
- uint64_t write_bytes;
- uint64_t writev_bytes;
- uint64_t readv_bytes;
- uint64_t read_bytes;
-
- //Counter
- uint32_t write_err;
- uint32_t writev_err;
- uint32_t read_err;
- uint32_t readv_err;
- uint32_t unlink_err;
- uint32_t fsync_err;
- uint32_t open_err;
- uint32_t create_err;
-} netdata_publish_vfs_t;
-
-enum netdata_publish_vfs_list {
- NETDATA_KEY_PUBLISH_VFS_UNLINK,
- NETDATA_KEY_PUBLISH_VFS_READ,
- NETDATA_KEY_PUBLISH_VFS_WRITE,
- NETDATA_KEY_PUBLISH_VFS_FSYNC,
- NETDATA_KEY_PUBLISH_VFS_OPEN,
- NETDATA_KEY_PUBLISH_VFS_CREATE,
-
- NETDATA_KEY_PUBLISH_VFS_END
-};
-
-enum vfs_counters {
- NETDATA_KEY_CALLS_VFS_WRITE,
- NETDATA_KEY_ERROR_VFS_WRITE,
- NETDATA_KEY_BYTES_VFS_WRITE,
-
- NETDATA_KEY_CALLS_VFS_WRITEV,
- NETDATA_KEY_ERROR_VFS_WRITEV,
- NETDATA_KEY_BYTES_VFS_WRITEV,
-
- NETDATA_KEY_CALLS_VFS_READ,
- NETDATA_KEY_ERROR_VFS_READ,
- NETDATA_KEY_BYTES_VFS_READ,
-
- NETDATA_KEY_CALLS_VFS_READV,
- NETDATA_KEY_ERROR_VFS_READV,
- NETDATA_KEY_BYTES_VFS_READV,
-
- NETDATA_KEY_CALLS_VFS_UNLINK,
- NETDATA_KEY_ERROR_VFS_UNLINK,
-
- NETDATA_KEY_CALLS_VFS_FSYNC,
- NETDATA_KEY_ERROR_VFS_FSYNC,
-
- NETDATA_KEY_CALLS_VFS_OPEN,
- NETDATA_KEY_ERROR_VFS_OPEN,
-
- NETDATA_KEY_CALLS_VFS_CREATE,
- NETDATA_KEY_ERROR_VFS_CREATE,
-
- // Keep this as last and don't skip numbers as it is used as element counter
- NETDATA_VFS_COUNTER
-};
-
-enum netdata_vfs_tables {
- NETDATA_VFS_PID,
- NETDATA_VFS_ALL,
- NETDATA_VFS_CTRL
-};
-
-enum netdata_vfs_calls_name {
- NETDATA_EBPF_VFS_WRITE,
- NETDATA_EBPF_VFS_WRITEV,
- NETDATA_EBPF_VFS_READ,
- NETDATA_EBPF_VFS_READV,
- NETDATA_EBPF_VFS_UNLINK,
- NETDATA_EBPF_VFS_FSYNC,
- NETDATA_EBPF_VFS_OPEN,
- NETDATA_EBPF_VFS_CREATE,
-
- NETDATA_VFS_END_LIST
-};
-
-void *ebpf_vfs_thread(void *ptr);
-void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_vfs_release(netdata_publish_vfs_t *stat);
-extern netdata_ebpf_targets_t vfs_targets[];
-
-extern struct config vfs_config;
-
-#endif /* NETDATA_EBPF_VFS_H */
diff --git a/collectors/ebpf.plugin/integrations/ebpf_cachestat.md b/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
deleted file mode 100644
index 5bf0a3774..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
+++ /dev/null
@@ -1,179 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_cachestat.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Cachestat"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Cachestat
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: cachestat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Cachestat instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.cachestat_ratio | ratio | % |
-| mem.cachestat_dirties | dirty | page/s |
-| mem.cachestat_hits | hit | hits/s |
-| mem.cachestat_misses | miss | misses/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_cachestat_hit_ratio | ratio | % |
-| app.ebpf_cachestat_dirty_pages | pages | page/s |
-| app.ebpf_cachestat_access | hits | hits/s |
-| app.ebpf_cachestat_misses | misses | misses/s |
-
-### Per cgroup
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.cachestat_ratio | ratio | % |
-| cgroup.cachestat_dirties | dirty | page/s |
-| cgroup.cachestat_hits | hit | hits/s |
-| cgroup.cachestat_misses | miss | misses/s |
-| services.cachestat_ratio | a dimension per systemd service | % |
-| services.cachestat_dirties | a dimension per systemd service | page/s |
-| services.cachestat_hits | a dimension per systemd service | hits/s |
-| services.cachestat_misses | a dimension per systemd service | misses/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/cachestat.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/cachestat.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_dcstat.md b/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
deleted file mode 100644
index 4c5719026..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
+++ /dev/null
@@ -1,177 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_dcstat.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF DCstat"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF DCstat
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: dcstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor directory cache events per application given an overall vision about files on memory or storage device.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_dc_ratio | ratio | % |
-| app.ebpf_dc_reference | files | files |
-| app.ebpf_dc_not_cache | files | files |
-| app.ebpf_dc_not_found | files | files |
-
-### Per filesystem
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.dc_reference | reference, slow, miss | files |
-| filesystem.dc_hit_ratio | ratio | % |
-
-### Per cgroup
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.dc_ratio | ratio | % |
-| cgroup.dc_reference | reference | files |
-| cgroup.dc_not_cache | slow | files |
-| cgroup.dc_not_found | miss | files |
-| services.dc_ratio | a dimension per systemd service | % |
-| services.dc_reference | a dimension per systemd service | files |
-| services.dc_not_cache | a dimension per systemd service | files |
-| services.dc_not_found | a dimension per systemd service | files |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/dcstat.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/dcstat.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config option</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_disk.md b/collectors/ebpf.plugin/integrations/ebpf_disk.md
deleted file mode 100644
index 557da125d..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_disk.md
+++ /dev/null
@@ -1,137 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_disk.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Disk"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Disk
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: disk
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Measure latency for I/O events on disk.
-
-Attach tracepoints to internal kernel functions.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per disk
-
-These metrics measure latency for I/O events on every hard disk present on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.latency_io | latency | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/disk.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/disk.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md b/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
deleted file mode 100644
index 23f5bd26e..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
+++ /dev/null
@@ -1,177 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Filedescriptor"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Filedescriptor
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: filedescriptor
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor calls for functions responsible to open or close a file descriptor and possible errors.
-
-Attach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-Depending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.fd_open | open | calls/s |
-| cgroup.fd_open_error | open | calls/s |
-| cgroup.fd_closed | close | calls/s |
-| cgroup.fd_close_error | close | calls/s |
-| services.file_open | a dimension per systemd service | calls/s |
-| services.file_open_error | a dimension per systemd service | calls/s |
-| services.file_closed | a dimension per systemd service | calls/s |
-| services.file_close_error | a dimension per systemd service | calls/s |
-
-### Per eBPF Filedescriptor instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.file_descriptor | open, close | calls/s |
-| filesystem.file_error | open, close | calls/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_file_open | calls | calls/s |
-| app.ebpf_file_open_error | calls | calls/s |
-| app.ebpf_file_closed | calls | calls/s |
-| app.ebpf_file_close_error | calls | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/fd.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/fd.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_filesystem.md b/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
deleted file mode 100644
index 7a1bb832b..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
+++ /dev/null
@@ -1,163 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_filesystem.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Filesystem"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Filesystem
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: filesystem
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor latency for main actions on filesystem like I/O events.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per filesystem
-
-Latency charts associate with filesystem actions.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.read_latency | latency period | calls/s |
-| filesystem.open_latency | latency period | calls/s |
-| filesystem.sync_latency | latency period | calls/s |
-
-### Per iilesystem
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.write_latency | latency period | calls/s |
-
-### Per eBPF Filesystem instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.attributte_latency | latency period | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/filesystem.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/filesystem.conf
-```
-#### Options
-
-This configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |
-| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |
-| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |
-| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |
-| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_hardirq.md b/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
deleted file mode 100644
index f9b529624..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
+++ /dev/null
@@ -1,137 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_hardirq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Hardirq"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Hardirq
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: hardirq
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor latency for each HardIRQ available.
-
-Attach tracepoints to internal kernel functions.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Hardirq instance
-
-These metrics show latest timestamp for each hardIRQ available on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.hardirq_latency | hardirq names | milliseconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/hardirq.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/hardirq.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_mdflush.md b/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
deleted file mode 100644
index 0081b7d83..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
+++ /dev/null
@@ -1,132 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_mdflush.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF MDflush"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF MDflush
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: mdflush
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor when flush events happen between disks.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF MDflush instance
-
-Number of times md_flush_request was called since last time.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mdstat.mdstat_flush | disk | flushes |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/mdflush.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/mdflush.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_mount.md b/collectors/ebpf.plugin/integrations/ebpf_mount.md
deleted file mode 100644
index d19e57809..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_mount.md
+++ /dev/null
@@ -1,140 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_mount.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Mount"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Mount
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: mount
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor calls for mount and umount syscall.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Mount instance
-
-Calls for syscalls mount an umount.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mount_points.call | mount, umount | calls/s |
-| mount_points.error | mount, umount | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/mount.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/mount.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_oomkill.md b/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
deleted file mode 100644
index 897cddfac..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
+++ /dev/null
@@ -1,160 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_oomkill.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF OOMkill"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF OOMkill
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: oomkill
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor applications that reach out of memory.
-
-Attach tracepoint to internal kernel functions.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-These metrics show cgroup/service that reached OOM.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.oomkills | cgroup name | kills |
-| services.oomkills | a dimension per systemd service | kills |
-
-### Per apps
-
-These metrics show cgroup/service that reached OOM.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.oomkill | kills | kills |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/oomkill.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/oomkill.conf
-```
-#### Options
-
-Overwrite default configuration reducing number of I/O events
-
-
-#### Examples
-There are no configuration examples.
-
-
-
-## Troubleshooting
-
-### update every
-
-
-
-### ebpf load mode
-
-
-
-### lifetime
-
-
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_process.md b/collectors/ebpf.plugin/integrations/ebpf_process.md
deleted file mode 100644
index 109890139..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_process.md
+++ /dev/null
@@ -1,111 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_process.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Process"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Process
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: process
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor internal memory usage.
-
-Uses netdata internal statistic to monitor memory management by plugin.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Process instance
-
-How plugin is allocating memory.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| netdata.ebpf_aral_stat_size | memory | bytes |
-| netdata.ebpf_aral_stat_alloc | aral | calls |
-| netdata.ebpf_threads | total, running | threads |
-| netdata.ebpf_load_methods | legacy, co-re | methods |
-| netdata.ebpf_kernel_memory | memory_locked | bytes |
-| netdata.ebpf_hash_tables_count | hash_table | hash tables |
-| netdata.ebpf_aral_stat_size | memory | bytes |
-| netdata.ebpf_aral_stat_alloc | aral | calls |
-| netdata.ebpf_aral_stat_size | memory | bytes |
-| netdata.ebpf_aral_stat_alloc | aral | calls |
-| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |
-| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Netdata flags.
-
-To have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.
-
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_processes.md b/collectors/ebpf.plugin/integrations/ebpf_processes.md
deleted file mode 100644
index 62542359a..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_processes.md
+++ /dev/null
@@ -1,187 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_processes.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Processes"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Processes
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: processes
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor calls for function creating tasks (threads and processes) inside Linux kernel.
-
-Attach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Processes instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.process_thread | process | calls/s |
-| system.process_status | process, zombie | difference |
-| system.exit | process | calls/s |
-| system.task_error | task | calls/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.process_create | calls | calls/s |
-| app.thread_create | call | calls/s |
-| app.task_exit | call | calls/s |
-| app.task_close | call | calls/s |
-| app.task_error | app | calls/s |
-
-### Per cgroup
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.process_create | process | calls/s |
-| cgroup.thread_create | thread | calls/s |
-| cgroup.task_exit | exit | calls/s |
-| cgroup.task_close | process | calls/s |
-| cgroup.task_error | process | calls/s |
-| services.process_create | a dimension per systemd service | calls/s |
-| services.thread_create | a dimension per systemd service | calls/s |
-| services.task_close | a dimension per systemd service | calls/s |
-| services.task_exit | a dimension per systemd service | calls/s |
-| services.task_error | a dimension per systemd service | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/process.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/process.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_shm.md b/collectors/ebpf.plugin/integrations/ebpf_shm.md
deleted file mode 100644
index ffa05c770..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_shm.md
+++ /dev/null
@@ -1,185 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_shm.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF SHM"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF SHM
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: shm
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor syscall responsible to manipulate shared memory.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.shmget | get | calls/s |
-| cgroup.shmat | at | calls/s |
-| cgroup.shmdt | dt | calls/s |
-| cgroup.shmctl | ctl | calls/s |
-| services.shmget | a dimension per systemd service | calls/s |
-| services.shmat | a dimension per systemd service | calls/s |
-| services.shmdt | a dimension per systemd service | calls/s |
-| services.shmctl | a dimension per systemd service | calls/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_shmget_call | calls | calls/s |
-| app.ebpf_shmat_call | calls | calls/s |
-| app.ebpf_shmdt_call | calls | calls/s |
-| app.ebpf_shmctl_call | calls | calls/s |
-
-### Per eBPF SHM instance
-
-These Metrics show number of calls for specified syscall.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.shared_memory_calls | get, at, dt, ctl | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/shm.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/shm.conf
-```
-#### Options
-
-This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |
-| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |
-| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |
-| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_socket.md b/collectors/ebpf.plugin/integrations/ebpf_socket.md
deleted file mode 100644
index dc7a7d07b..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_socket.md
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_socket.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Socket"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Socket
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: socket
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor bandwidth consumption per application for protocols TCP and UDP.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Socket instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ip.inbound_conn | connection_tcp | connections/s |
-| ip.tcp_outbound_conn | received | connections/s |
-| ip.tcp_functions | received, send, closed | calls/s |
-| ip.total_tcp_bandwidth | received, send | kilobits/s |
-| ip.tcp_error | received, send | calls/s |
-| ip.tcp_retransmit | retransmited | calls/s |
-| ip.udp_functions | received, send | calls/s |
-| ip.total_udp_bandwidth | received, send | kilobits/s |
-| ip.udp_error | received, send | calls/s |
-
-### Per apps
-
-These metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_call_tcp_v4_connection | connections | connections/s |
-| app.app.ebpf_call_tcp_v6_connection | connections | connections/s |
-| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |
-| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |
-| app.ebpf_call_tcp_sendmsg | calls | calls/s |
-| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |
-| app.ebpf_call_tcp_retransmit | calls | calls/s |
-| app.ebpf_call_udp_sendmsg | calls | calls/s |
-| app.ebpf_call_udp_recvmsg | calls | calls/s |
-
-### Per cgroup
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.net_conn_ipv4 | connected_v4 | connections/s |
-| cgroup.net_conn_ipv6 | connected_v6 | connections/s |
-| cgroup.net_bytes_recv | received | calls/s |
-| cgroup.net_bytes_sent | sent | calls/s |
-| cgroup.net_tcp_recv | received | calls/s |
-| cgroup.net_tcp_send | sent | calls/s |
-| cgroup.net_retransmit | retransmitted | calls/s |
-| cgroup.net_udp_send | sent | calls/s |
-| cgroup.net_udp_recv | received | calls/s |
-| services.net_conn_ipv6 | a dimension per systemd service | connections/s |
-| services.net_bytes_recv | a dimension per systemd service | kilobits/s |
-| services.net_bytes_sent | a dimension per systemd service | kilobits/s |
-| services.net_tcp_recv | a dimension per systemd service | calls/s |
-| services.net_tcp_send | a dimension per systemd service | calls/s |
-| services.net_tcp_retransmit | a dimension per systemd service | calls/s |
-| services.net_udp_send | a dimension per systemd service | calls/s |
-| services.net_udp_recv | a dimension per systemd service | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/network.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/network.conf
-```
-#### Options
-
-All options are defined inside section `[global]`. Options inside `network connections` are ignored for while.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |
-| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |
-| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |
-| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_softirq.md b/collectors/ebpf.plugin/integrations/ebpf_softirq.md
deleted file mode 100644
index 6a4312c6e..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_softirq.md
+++ /dev/null
@@ -1,137 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_softirq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF SoftIRQ"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF SoftIRQ
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: softirq
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor latency for each SoftIRQ available.
-
-Attach kprobe to internal kernel functions.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF SoftIRQ instance
-
-These metrics show latest timestamp for each softIRQ available on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.softirq_latency | soft IRQs | milliseconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/softirq.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/softirq.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_swap.md b/collectors/ebpf.plugin/integrations/ebpf_swap.md
deleted file mode 100644
index ce2423f8d..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_swap.md
+++ /dev/null
@@ -1,170 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_swap.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF SWAP"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF SWAP
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: swap
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitors when swap has I/O events and applications executing events.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.swap_read | read | calls/s |
-| cgroup.swap_write | write | calls/s |
-| services.swap_read | a dimension per systemd service | calls/s |
-| services.swap_write | a dimension per systemd service | calls/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |
-| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |
-
-### Per eBPF SWAP instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.swapcalls | write, read | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/swap.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/swap.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_sync.md b/collectors/ebpf.plugin/integrations/ebpf_sync.md
deleted file mode 100644
index 6f6c246a7..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_sync.md
+++ /dev/null
@@ -1,157 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_sync.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF Sync"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF Sync
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: sync
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor syscall responsible to move data from memory to storage device.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per eBPF Sync instance
-
-These metrics show total number of calls to functions inside kernel.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.file_sync | fsync, fdatasync | calls/s |
-| mem.meory_map | msync | calls/s |
-| mem.sync | sync, syncfs | calls/s |
-| mem.file_segment | sync_file_range | calls/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ sync_freq ](https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-#### Debug Filesystem
-
-This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/sync.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/sync.conf
-```
-#### Options
-
-This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-| sync | Enable or disable monitoring for syscall `sync` | yes | no |
-| msync | Enable or disable monitoring for syscall `msync` | yes | no |
-| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |
-| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |
-| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |
-| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/integrations/ebpf_vfs.md b/collectors/ebpf.plugin/integrations/ebpf_vfs.md
deleted file mode 100644
index 4b824e975..000000000
--- a/collectors/ebpf.plugin/integrations/ebpf_vfs.md
+++ /dev/null
@@ -1,212 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_vfs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
-sidebar_label: "eBPF VFS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# eBPF VFS
-
-
-<img src="https://netdata.cloud/img/ebpf.jpg" width="150"/>
-
-
-Plugin: ebpf.plugin
-Module: vfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor I/O events on Linux Virtual Filesystem.
-
-Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per cgroup
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cgroup.vfs_unlink | delete | calls/s |
-| cgroup.vfs_write | write | calls/s |
-| cgroup.vfs_write_error | write | calls/s |
-| cgroup.vfs_read | read | calls/s |
-| cgroup.vfs_read_error | read | calls/s |
-| cgroup.vfs_write_bytes | write | bytes/s |
-| cgroup.vfs_read_bytes | read | bytes/s |
-| cgroup.vfs_fsync | fsync | calls/s |
-| cgroup.vfs_fsync_error | fsync | calls/s |
-| cgroup.vfs_open | open | calls/s |
-| cgroup.vfs_open_error | open | calls/s |
-| cgroup.vfs_create | create | calls/s |
-| cgroup.vfs_create_error | create | calls/s |
-| services.vfs_unlink | a dimension per systemd service | calls/s |
-| services.vfs_write | a dimension per systemd service | calls/s |
-| services.vfs_write_error | a dimension per systemd service | calls/s |
-| services.vfs_read | a dimension per systemd service | calls/s |
-| services.vfs_read_error | a dimension per systemd service | calls/s |
-| services.vfs_write_bytes | a dimension per systemd service | bytes/s |
-| services.vfs_read_bytes | a dimension per systemd service | bytes/s |
-| services.vfs_fsync | a dimension per systemd service | calls/s |
-| services.vfs_fsync_error | a dimension per systemd service | calls/s |
-| services.vfs_open | a dimension per systemd service | calls/s |
-| services.vfs_open_error | a dimension per systemd service | calls/s |
-| services.vfs_create | a dimension per systemd service | calls/s |
-| services.vfs_create_error | a dimension per systemd service | calls/s |
-
-### Per eBPF VFS instance
-
-These Metrics show grouped information per cgroup/service.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| filesystem.vfs_deleted_objects | delete | calls/s |
-| filesystem.vfs_io | read, write | calls/s |
-| filesystem.vfs_io_bytes | read, write | bytes/s |
-| filesystem.vfs_io_error | read, write | calls/s |
-| filesystem.vfs_fsync | fsync | calls/s |
-| filesystem.vfs_fsync_error | fsync | calls/s |
-| filesystem.vfs_open | open | calls/s |
-| filesystem.vfs_open_error | open | calls/s |
-| filesystem.vfs_create | create | calls/s |
-| filesystem.vfs_create_error | create | calls/s |
-
-### Per apps
-
-These Metrics show grouped information per apps group.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| app_group | The name of the group defined in the configuration. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| app.ebpf_call_vfs_unlink | calls | calls/s |
-| app.ebpf_call_vfs_write | calls | calls/s |
-| app.ebpf_call_vfs_write_error | calls | calls/s |
-| app.ebpf_call_vfs_read | calls | calls/s |
-| app.ebpf_call_vfs_read_error | calls | calls/s |
-| app.ebpf_call_vfs_write_bytes | writes | bytes/s |
-| app.ebpf_call_vfs_read_bytes | reads | bytes/s |
-| app.ebpf_call_vfs_fsync | calls | calls/s |
-| app.ebpf_call_vfs_fsync_error | calls | calls/s |
-| app.ebpf_call_vfs_open | calls | calls/s |
-| app.ebpf_call_vfs_open_error | calls | calls/s |
-| app.ebpf_call_vfs_create | calls | calls/s |
-| app.ebpf_call_vfs_create_error | calls | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Compile kernel
-
-Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
-When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
-with different names.
-
-Now follow steps:
-1. Copy the configuration file to /usr/src/linux/.config.
-2. Select the necessary options: make oldconfig
-3. Compile your kernel image: make bzImage
-4. Compile your modules: make modules
-5. Copy your new kernel image for boot loader directory
-6. Install the new modules: make modules_install
-7. Generate an initial ramdisk image (`initrd`) if it is necessary.
-8. Update your boot loader
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ebpf.d/vfs.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ebpf.d/vfs.conf
-```
-#### Options
-
-All options are defined inside section `[global]`.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 5 | no |
-| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |
-| apps | Enable or disable integration with apps.plugin | no | no |
-| cgroups | Enable or disable integration with cgroup.plugin | no | no |
-| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |
-| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |
-| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |
-| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |
-| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/ebpf.plugin/metadata.yaml b/collectors/ebpf.plugin/metadata.yaml
deleted file mode 100644
index 97b5df389..000000000
--- a/collectors/ebpf.plugin/metadata.yaml
+++ /dev/null
@@ -1,3320 +0,0 @@
-plugin_name: ebpf.plugin
-modules:
- - meta:
- plugin_name: ebpf.plugin
- module_name: filedescriptor
- monitored_instance:
- name: eBPF Filedescriptor
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - file
- - eBPF
- - fd
- - open
- - close
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for functions responsible to open or close a file descriptor and possible errors."
- method_description: "Attach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "Depending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/fd.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.fd_open
- description: Number of open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_closed
- description: Files closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: cgroup.fd_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: services.file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: filesystem.file_descriptor
- description: Open and close calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - name: filesystem.file_error
- description: Open fails
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - meta:
- plugin_name: ebpf.plugin
- module_name: processes
- monitored_instance:
- name: eBPF Processes
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - thread
- - fork
- - process
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for function creating tasks (threads and processes) inside Linux kernel."
- method_description: "Attach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/process.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: system.process_thread
- description: Start process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.process_status
- description: Process not closed
- unit: "difference"
- chart_type: line
- dimensions:
- - name: process
- - name: zombie
- - name: system.exit
- description: Exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.task_error
- description: Fails to create process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: task
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: app
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.process_create
- description: Process started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: thread
- - name: cgroup.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: exit
- - name: cgroup.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: services.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_close
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_exit
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: disk
- monitored_instance:
- name: eBPF Disk
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - hard Disk
- - eBPF
- - latency
- - partition
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Measure latency for I/O events on disk."
- method_description: "Attach tracepoints to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/disk.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: disk
- description: "These metrics measure latency for I/O events on every hard disk present on host."
- labels: []
- metrics:
- - name: disk.latency_io
- description: Disk latency
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency
- - meta:
- plugin_name: ebpf.plugin
- module_name: hardirq
- monitored_instance:
- name: eBPF Hardirq
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - HardIRQ
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for each HardIRQ available."
- method_description: "Attach tracepoints to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/hardirq.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show latest timestamp for each hardIRQ available on host."
- labels: []
- metrics:
- - name: system.hardirq_latency
- description: Hard IRQ latency
- unit: "milliseconds"
- chart_type: stacked
- dimensions:
- - name: hardirq names
- - meta:
- plugin_name: ebpf.plugin
- module_name: cachestat
- monitored_instance:
- name: eBPF Cachestat
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Page cache
- - Hit ratio
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Linux page cache events giving for users a general vision about how his kernel is manipulating files."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/cachestat.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: mem.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: mem.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: mem.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_cachestat_hit_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: app.ebpf_cachestat_dirty_pages
- description: Number of dirty pages
- unit: "page/s"
- chart_type: stacked
- dimensions:
- - name: pages
- - name: app.ebpf_cachestat_access
- description: Number of accessed files
- unit: "hits/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: app.ebpf_cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: stacked
- dimensions:
- - name: misses
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: cgroup.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: cgroup.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - name: services.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: sync
- monitored_instance:
- name: eBPF Sync
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - syscall
- - eBPF
- - hard disk
- - memory
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor syscall responsible to move data from memory to storage device."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).
- configuration:
- file:
- name: "ebpf.d/sync.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: sync
- description: Enable or disable monitoring for syscall `sync`
- default_value: yes
- required: false
- - name: msync
- description: Enable or disable monitoring for syscall `msync`
- default_value: yes
- required: false
- - name: fsync
- description: Enable or disable monitoring for syscall `fsync`
- default_value: yes
- required: false
- - name: fdatasync
- description: Enable or disable monitoring for syscall `fdatasync`
- default_value: yes
- required: false
- - name: syncfs
- description: Enable or disable monitoring for syscall `syncfs`
- default_value: yes
- required: false
- - name: sync_file_range
- description: Enable or disable monitoring for syscall `sync_file_range`
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: sync_freq
- link: https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf
- metric: mem.sync
- info:
- number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the
- underlying filesystems.
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.file_sync
- description: Monitor calls to fsync(2) and fdatasync(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: fsync
- - name: fdatasync
- - name: mem.meory_map
- description: Monitor calls to msync(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: msync
- - name: mem.sync
- description: Monitor calls to sync(2) and syncfs(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync
- - name: syncfs
- - name: mem.file_segment
- description: Monitor calls to sync_file_range(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync_file_range
- - meta:
- plugin_name: ebpf.plugin
- module_name: mdflush
- monitored_instance:
- name: eBPF MDflush
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - MD
- - RAID
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor when flush events happen between disks."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/mdflush.conf"
- description: "Overwrite default configuration reducing I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Number of times md_flush_request was called since last time."
- labels: []
- metrics:
- - name: mdstat.mdstat_flush
- description: MD flushes
- unit: "flushes"
- chart_type: stacked
- dimensions:
- - name: disk
- - meta:
- plugin_name: ebpf.plugin
- module_name: swap
- monitored_instance:
- name: eBPF SWAP
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - SWAP
- - memory
- - eBPF
- - Hard Disk
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitors when swap has I/O events and applications executing events."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/swap.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.swap_read
- description: Calls to function swap_readpage.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.swap_write
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: services.swap_read
- description: Calls to swap_readpage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.swap_write
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_swap_readpage
- description: Calls to function swap_readpage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: app.ebpf_call_swap_writepage
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.swapcalls
- description: Calls to access swap memory
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: read
- - meta:
- plugin_name: ebpf.plugin
- module_name: oomkill
- monitored_instance:
- name: eBPF OOMkill
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - application
- - memory
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor applications that reach out of memory."
- method_description: "Attach tracepoint to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/oomkill.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- Overwrite default configuration reducing number of I/O events
- folding:
- title: "Config options"
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These metrics show cgroup/service that reached OOM."
- labels: []
- metrics:
- - name: cgroup.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: cgroup name
- - name: services.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These metrics show cgroup/service that reached OOM."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.oomkill
- description: OOM kills
- unit: "kills"
- chart_type: stacked
- dimensions:
- - name: kills
- - meta:
- plugin_name: ebpf.plugin
- module_name: socket
- monitored_instance:
- name: eBPF Socket
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - TCP
- - UDP
- - bandwidth
- - server
- - connection
- - socket
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor bandwidth consumption per application for protocols TCP and UDP."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/network.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`. Options inside `network connections` are ignored for while.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: bandwidth table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 16384
- required: false
- - name: ipv4 connection table size
- description: Number of elements stored inside hash tables used to monitor calls per IPV4 connections.
- default_value: 16384
- required: false
- - name: ipv6 connection table size
- description: Number of elements stored inside hash tables used to monitor calls per IPV6 connections.
- default_value: 16384
- required: false
- - name: udp connection table size
- description: Number of temporary elements stored inside hash tables used to monitor UDP connections.
- default_value: 4096
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: ip.inbound_conn
- description: Inbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connection_tcp
- - name: ip.tcp_outbound_conn
- description: TCP outbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: received
- - name: ip.tcp_functions
- description: Calls to internal functions
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: closed
- - name: ip.total_tcp_bandwidth
- description: TCP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_error
- description: TCP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_retransmit
- description: Packages retransmitted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmited
- - name: ip.udp_functions
- description: UDP calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.total_udp_bandwidth
- description: UDP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.udp_error
- description: UDP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: apps
- description: "These metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_tcp_v4_connection
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: connections
- - name: app.app.ebpf_call_tcp_v6_connection
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: connections
- - name: app.ebpf_sock_bytes_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: bandwidth
- - name: app.ebpf_sock_bytes_received
- description: bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: bandwidth
- - name: app.ebpf_call_tcp_sendmsg
- description: Calls for tcp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_tcp_cleanup_rbuf
- description: Calls for tcp_cleanup_rbuf
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_tcp_retransmit
- description: Calls for tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_udp_sendmsg
- description: Calls for udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_udp_recvmsg
- description: Calls for udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.net_conn_ipv4
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v4
- - name: cgroup.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v6
- - name: cgroup.net_bytes_recv
- description: Bytes received
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_bytes_sent
- description: Bytes sent
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_retransmit
- description: Calls to tcp_retransmit.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmitted
- - name: cgroup.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: services.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_recv
- description: Bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_retransmit
- description: Calls to tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: dcstat
- monitored_instance:
- name: eBPF DCstat
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Directory Cache
- - File system
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor directory cache events per application given an overall vision about files on memory or storage device."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/dcstat.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: app.ebpf_dc_reference
- description: Count file access
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: app.ebpf_dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: app.ebpf_dc_not_found
- description: Files not found
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: filesystem
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: filesystem.dc_reference
- description: Variables used to calculate hit ratio.
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: slow
- - name: miss
- - name: filesystem.dc_hit_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: cgroup.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: slow
- - name: cgroup.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: miss
- - name: services.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: filesystem
- monitored_instance:
- name: eBPF Filesystem
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Filesystem
- - ext4
- - btrfs
- - nfs
- - xfs
- - zfs
- - eBPF
- - latency
- - I/O
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for main actions on filesystem like I/O events."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/filesystem.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: btrfsdist
- description: Enable or disable latency monitoring for functions associated with btrfs filesystem.
- default_value: yes
- required: false
- - name: ext4dist
- description: Enable or disable latency monitoring for functions associated with ext4 filesystem.
- default_value: yes
- required: false
- - name: nfsdist
- description: Enable or disable latency monitoring for functions associated with nfs filesystem.
- default_value: yes
- required: false
- - name: xfsdist
- description: Enable or disable latency monitoring for functions associated with xfs filesystem.
- default_value: yes
- required: false
- - name: zfsdist
- description: Enable or disable latency monitoring for functions associated with zfs filesystem.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: filesystem
- description: "Latency charts associate with filesystem actions."
- labels: []
- metrics:
- - name: filesystem.read_latency
- description: ext4 latency for each read request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.open_latency
- description: ext4 latency for each open request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.sync_latency
- description: ext4 latency for each sync request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: iilesystem
- description: ""
- labels: []
- metrics:
- - name: filesystem.write_latency
- description: ext4 latency for each write request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: global
- description: ""
- labels: []
- metrics:
- - name: filesystem.attributte_latency
- description: nfs latency for each attribute request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - meta:
- plugin_name: ebpf.plugin
- module_name: shm
- monitored_instance:
- name: eBPF SHM
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - syscall
- - shared memory
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor syscall responsible to manipulate shared memory."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/shm.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: shmget
- description: Enable or disable monitoring for syscall `shmget`
- default_value: yes
- required: false
- - name: shmat
- description: Enable or disable monitoring for syscall `shmat`
- default_value: yes
- required: false
- - name: shmdt
- description: Enable or disable monitoring for syscall `shmdt`
- default_value: yes
- required: false
- - name: shmctl
- description: Enable or disable monitoring for syscall `shmctl`
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.shmget
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: cgroup.shmat
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: at
- - name: cgroup.shmdt
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: dt
- - name: cgroup.shmctl
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: ctl
- - name: services.shmget
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmat
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmdt
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmctl
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_shmget_call
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmat_call
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmdt_call
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmctl_call
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: global
- description: "These Metrics show number of calls for specified syscall."
- labels: []
- metrics:
- - name: system.shared_memory_calls
- description: Calls to shared memory system calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: at
- - name: dt
- - name: ctl
- - meta:
- plugin_name: ebpf.plugin
- module_name: softirq
- monitored_instance:
- name: eBPF SoftIRQ
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - SoftIRQ
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for each SoftIRQ available."
- method_description: "Attach kprobe to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/softirq.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show latest timestamp for each softIRQ available on host."
- labels: []
- metrics:
- - name: system.softirq_latency
- description: Soft IRQ latency
- unit: "milliseconds"
- chart_type: stacked
- dimensions:
- - name: soft IRQs
- - meta:
- plugin_name: ebpf.plugin
- module_name: mount
- monitored_instance:
- name: eBPF Mount
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - mount
- - umount
- - device
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for mount and umount syscall."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/mount.conf"
- description: "Overwrite default configuration."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Calls for syscalls mount an umount."
- labels: []
- metrics:
- - name: mount_points.call
- description: Calls to mount and umount syscalls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - name: mount_points.error
- description: Errors to mount and umount file systems
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - meta:
- plugin_name: ebpf.plugin
- module_name: vfs
- monitored_instance:
- name: eBPF VFS
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - virtual
- - filesystem
- - eBPF
- - I/O
- - files
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor I/O events on Linux Virtual Filesystem."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/vfs.conf"
- description: "Overwrite default configuration helping to reduce memory usage."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: cgroup.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: cgroup.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: services.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: global
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: filesystem.vfs_deleted_objects
- description: Remove files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: filesystem.vfs_io
- description: Calls to IO
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_bytes
- description: Bytes written and read
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_error
- description: Fails to write or read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_fsync_error
- description: Fails to synchronize
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_open_error
- description: Fails to open a file
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: filesystem.vfs_create_error
- description: Fails to create a file.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: writes
- - name: app.ebpf_call_vfs_read_bytes
- description: Bytes read on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: reads
- - name: app.ebpf_call_vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - meta:
- plugin_name: ebpf.plugin
- module_name: process
- monitored_instance:
- name: eBPF Process
- link: "https://github.com/netdata/netdata/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Memory
- - plugin
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor internal memory usage."
- method_description: "Uses netdata internal statistic to monitor memory management by plugin."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Netdata flags.
- description: "To have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`."
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "How plugin is allocating memory."
- labels: []
- metrics:
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory.
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_threads
- description: Threads info
- unit: "threads"
- chart_type: line
- dimensions:
- - name: total
- - name: running
- - name: netdata.ebpf_load_methods
- description: Load info
- unit: "methods"
- chart_type: line
- dimensions:
- - name: legacy
- - name: co-re
- - name: netdata.ebpf_kernel_memory
- description: Memory allocated for hash tables.
- unit: "bytes"
- chart_type: line
- dimensions:
- - name: memory_locked
- - name: netdata.ebpf_hash_tables_count
- description: Number of hash tables loaded
- unit: "hash tables"
- chart_type: line
- dimensions:
- - name: hash_table
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_hash_tables_insert_pid_elements
- description: Number of times an element was inserted in a hash table.
- unit: "rows"
- chart_type: line
- dimensions:
- - name: thread
- - name: netdata.ebpf_hash_tables_remove_pid_elements
- description: Number of times an element was removed in a hash table.
- unit: "rows"
- chart_type: line
- dimensions:
- - name: thread
diff --git a/collectors/freebsd.plugin/Makefile.am b/collectors/freebsd.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/freebsd.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/freebsd.plugin/README.md b/collectors/freebsd.plugin/README.md
deleted file mode 100644
index 9c33fccb1..000000000
--- a/collectors/freebsd.plugin/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-<!--
-title: "FreeBSD system metrics (freebsd.plugin)"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/README.md"
-sidebar_label: "FreeBSD system metrics (freebsd.plugin)"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
-# FreeBSD system metrics (freebsd.plugin)
-
-Collects resource usage and performance data on FreeBSD systems
-
-By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-
-
diff --git a/collectors/freebsd.plugin/freebsd_devstat.c b/collectors/freebsd.plugin/freebsd_devstat.c
deleted file mode 100644
index ca6048a16..000000000
--- a/collectors/freebsd.plugin/freebsd_devstat.c
+++ /dev/null
@@ -1,759 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-#include <sys/devicestat.h>
-
-struct disk {
- char *name;
- uint32_t hash;
- size_t len;
-
- // flags
- int configured;
- int enabled;
- int updated;
-
- int do_io;
- int do_ops;
- int do_qops;
- int do_util;
- int do_iotime;
- int do_await;
- int do_avagsz;
- int do_svctm;
-
-
- // data for differential charts
-
- struct prev_dstat {
- collected_number bytes_read;
- collected_number bytes_write;
- collected_number bytes_free;
- collected_number operations_read;
- collected_number operations_write;
- collected_number operations_other;
- collected_number operations_free;
- collected_number duration_read_ms;
- collected_number duration_write_ms;
- collected_number duration_other_ms;
- collected_number duration_free_ms;
- collected_number busy_time_ms;
- } prev_dstat;
-
- // charts and dimensions
-
- RRDSET *st_io;
- RRDDIM *rd_io_in;
- RRDDIM *rd_io_out;
- RRDDIM *rd_io_free;
-
- RRDSET *st_ops;
- RRDDIM *rd_ops_in;
- RRDDIM *rd_ops_out;
- RRDDIM *rd_ops_other;
- RRDDIM *rd_ops_free;
-
- RRDSET *st_qops;
- RRDDIM *rd_qops;
-
- RRDSET *st_util;
- RRDDIM *rd_util;
-
- RRDSET *st_iotime;
- RRDDIM *rd_iotime_in;
- RRDDIM *rd_iotime_out;
- RRDDIM *rd_iotime_other;
- RRDDIM *rd_iotime_free;
-
- RRDSET *st_await;
- RRDDIM *rd_await_in;
- RRDDIM *rd_await_out;
- RRDDIM *rd_await_other;
- RRDDIM *rd_await_free;
-
- RRDSET *st_avagsz;
- RRDDIM *rd_avagsz_in;
- RRDDIM *rd_avagsz_out;
- RRDDIM *rd_avagsz_free;
-
- RRDSET *st_svctm;
- RRDDIM *rd_svctm;
-
- struct disk *next;
-};
-
-static struct disk *disks_root = NULL, *disks_last_used = NULL;
-
-static size_t disks_added = 0, disks_found = 0;
-
-static void disk_free(struct disk *dm) {
- if (likely(dm->st_io))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_io);
- if (likely(dm->st_ops))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_ops);
- if (likely(dm->st_qops))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_qops);
- if (likely(dm->st_util))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_util);
- if (likely(dm->st_iotime))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_iotime);
- if (likely(dm->st_await))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_await);
- if (likely(dm->st_avagsz))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_avagsz);
- if (likely(dm->st_svctm))
- rrdset_is_obsolete___safe_from_collector_thread(dm->st_svctm);
-
- disks_added--;
- freez(dm->name);
- freez(dm);
-}
-
-static void disks_cleanup() {
- if (likely(disks_found == disks_added)) return;
-
- struct disk *dm = disks_root, *last = NULL;
- while(dm) {
- if (unlikely(!dm->updated)) {
- // collector_info("Removing disk '%s', linked after '%s'", dm->name, last?last->name:"ROOT");
-
- if (disks_last_used == dm)
- disks_last_used = last;
-
- struct disk *t = dm;
-
- if (dm == disks_root || !last)
- disks_root = dm = dm->next;
-
- else
- last->next = dm = dm->next;
-
- t->next = NULL;
- disk_free(t);
- }
- else {
- last = dm;
- dm->updated = 0;
- dm = dm->next;
- }
- }
-}
-
-static struct disk *get_disk(const char *name) {
- struct disk *dm;
-
- uint32_t hash = simple_hash(name);
-
- // search it, from the last position to the end
- for(dm = disks_last_used ; dm ; dm = dm->next) {
- if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
- disks_last_used = dm->next;
- return dm;
- }
- }
-
- // search it from the beginning to the last position we used
- for(dm = disks_root ; dm != disks_last_used ; dm = dm->next) {
- if (unlikely(hash == dm->hash && !strcmp(name, dm->name))) {
- disks_last_used = dm->next;
- return dm;
- }
- }
-
- // create a new one
- dm = callocz(1, sizeof(struct disk));
- dm->name = strdupz(name);
- dm->hash = simple_hash(dm->name);
- dm->len = strlen(dm->name);
- disks_added++;
-
- // link it to the end
- if (disks_root) {
- struct disk *e;
- for(e = disks_root; e->next ; e = e->next) ;
- e->next = dm;
- }
- else
- disks_root = dm;
-
- return dm;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// kern.devstat
-
-int do_kern_devstat(int update_every, usec_t dt) {
-
-#define DEFAULT_EXCLUDED_DISKS ""
-#define CONFIG_SECTION_KERN_DEVSTAT "plugin:freebsd:kern.devstat"
-#define BINTIME_SCALE 5.42101086242752217003726400434970855712890625e-17 // this is 1000/2^64
-
- static int enable_new_disks = -1;
- static int enable_pass_devices = -1, do_system_io = -1, do_io = -1, do_ops = -1, do_qops = -1, do_util = -1,
- do_iotime = -1, do_await = -1, do_avagsz = -1, do_svctm = -1;
- static SIMPLE_PATTERN *excluded_disks = NULL;
-
- if (unlikely(enable_new_disks == -1)) {
- enable_new_disks = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
- "enable new disks detected at runtime", CONFIG_BOOLEAN_AUTO);
-
- enable_pass_devices = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT,
- "performance metrics for pass devices", CONFIG_BOOLEAN_AUTO);
-
- do_system_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "total bandwidth for all disks",
- CONFIG_BOOLEAN_YES);
-
- do_io = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "bandwidth for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_ops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "operations for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_qops = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "queued operations for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_util = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "utilization percentage for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "i/o time for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_await = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o time for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_avagsz = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average completed i/o bandwidth for all disks",
- CONFIG_BOOLEAN_AUTO);
- do_svctm = config_get_boolean_ondemand(CONFIG_SECTION_KERN_DEVSTAT, "average service time for all disks",
- CONFIG_BOOLEAN_AUTO);
-
- excluded_disks = simple_pattern_create(
- config_get(CONFIG_SECTION_KERN_DEVSTAT, "disable by default disks matching", DEFAULT_EXCLUDED_DISKS),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
- }
-
- if (likely(do_system_io || do_io || do_ops || do_qops || do_util || do_iotime || do_await || do_avagsz || do_svctm)) {
- static int mib_numdevs[3] = {0, 0, 0};
- int numdevs;
- int common_error = 0;
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.devstat.numdevs", mib_numdevs, numdevs))) {
- common_error = 1;
- } else {
- static int mib_devstat[3] = {0, 0, 0};
- static void *devstat_data = NULL;
- static int old_numdevs = 0;
-
- if (unlikely(numdevs != old_numdevs)) {
- devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) *
- numdevs); // there is generation number before devstat structures
- old_numdevs = numdevs;
- }
- if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
- sizeof(long) + sizeof(struct devstat) * numdevs))) {
- common_error = 1;
- } else {
- struct devstat *dstat;
- int i;
- collected_number total_disk_kbytes_read = 0;
- collected_number total_disk_kbytes_write = 0;
-
- disks_found = 0;
-
- dstat = (struct devstat*)((char*)devstat_data + sizeof(long)); // skip generation number
-
- for (i = 0; i < numdevs; i++) {
- if (likely(do_system_io)) {
- if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
- ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
- total_disk_kbytes_read += dstat[i].bytes[DEVSTAT_READ] / KILO_FACTOR;
- total_disk_kbytes_write += dstat[i].bytes[DEVSTAT_WRITE] / KILO_FACTOR;
- }
- }
-
- if (unlikely(!enable_pass_devices))
- if ((dstat[i].device_type & DEVSTAT_TYPE_PASS) == DEVSTAT_TYPE_PASS)
- continue;
-
- if (((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_DIRECT) ||
- ((dstat[i].device_type & DEVSTAT_TYPE_MASK) == DEVSTAT_TYPE_STORARRAY)) {
- char disk[DEVSTAT_NAME_LEN + MAX_INT_DIGITS + 1];
- struct cur_dstat {
- collected_number duration_read_ms;
- collected_number duration_write_ms;
- collected_number duration_other_ms;
- collected_number duration_free_ms;
- collected_number busy_time_ms;
- } cur_dstat;
-
- sprintf(disk, "%s%d", dstat[i].device_name, dstat[i].unit_number);
-
- struct disk *dm = get_disk(disk);
- dm->updated = 1;
- disks_found++;
-
- if(unlikely(!dm->configured)) {
- char var_name[4096 + 1];
-
- // this is the first time we see this disk
-
- // remember we configured it
- dm->configured = 1;
-
- dm->enabled = enable_new_disks;
-
- if (likely(dm->enabled))
- dm->enabled = !simple_pattern_matches(excluded_disks, disk);
-
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_KERN_DEVSTAT, disk);
- dm->enabled = config_get_boolean_ondemand(var_name, "enabled", dm->enabled);
-
- dm->do_io = config_get_boolean_ondemand(var_name, "bandwidth", do_io);
- dm->do_ops = config_get_boolean_ondemand(var_name, "operations", do_ops);
- dm->do_qops = config_get_boolean_ondemand(var_name, "queued operations", do_qops);
- dm->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", do_util);
- dm->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", do_iotime);
- dm->do_await = config_get_boolean_ondemand(var_name, "average completed i/o time",
- do_await);
- dm->do_avagsz = config_get_boolean_ondemand(var_name, "average completed i/o bandwidth",
- do_avagsz);
- dm->do_svctm = config_get_boolean_ondemand(var_name, "average service time", do_svctm);
-
- // initialise data for differential charts
-
- dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
- dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
- dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
- dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
- dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
- dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
- dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
- dm->prev_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- dm->prev_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
- + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
- dm->prev_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
- + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
- dm->prev_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
- + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
- dm->prev_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000
- + dstat[i].busy_time.frac * BINTIME_SCALE;
- }
-
- cur_dstat.duration_read_ms = dstat[i].duration[DEVSTAT_READ].sec * 1000
- + dstat[i].duration[DEVSTAT_READ].frac * BINTIME_SCALE;
- cur_dstat.duration_write_ms = dstat[i].duration[DEVSTAT_WRITE].sec * 1000
- + dstat[i].duration[DEVSTAT_WRITE].frac * BINTIME_SCALE;
- cur_dstat.duration_other_ms = dstat[i].duration[DEVSTAT_NO_DATA].sec * 1000
- + dstat[i].duration[DEVSTAT_NO_DATA].frac * BINTIME_SCALE;
- cur_dstat.duration_free_ms = dstat[i].duration[DEVSTAT_FREE].sec * 1000
- + dstat[i].duration[DEVSTAT_FREE].frac * BINTIME_SCALE;
-
- cur_dstat.busy_time_ms = dstat[i].busy_time.sec * 1000 + dstat[i].busy_time.frac * BINTIME_SCALE;
-
- if(dm->do_io == CONFIG_BOOLEAN_YES || (dm->do_io == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].bytes[DEVSTAT_READ] ||
- dstat[i].bytes[DEVSTAT_WRITE] ||
- dstat[i].bytes[DEVSTAT_FREE] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_io)) {
- dm->st_io = rrdset_create_localhost("disk",
- disk,
- NULL,
- disk,
- "disk.io",
- "Disk I/O Bandwidth",
- "KiB/s",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_IO,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- dm->rd_io_in = rrddim_add(dm->st_io, "reads", NULL, 1, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_io_out = rrddim_add(dm->st_io, "writes", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_io_free = rrddim_add(dm->st_io, "frees", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(dm->st_io, dm->rd_io_in, dstat[i].bytes[DEVSTAT_READ]);
- rrddim_set_by_pointer(dm->st_io, dm->rd_io_out, dstat[i].bytes[DEVSTAT_WRITE]);
- rrddim_set_by_pointer(dm->st_io, dm->rd_io_free, dstat[i].bytes[DEVSTAT_FREE]);
- rrdset_done(dm->st_io);
- }
-
- if(dm->do_ops == CONFIG_BOOLEAN_YES || (dm->do_ops == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] ||
- dstat[i].operations[DEVSTAT_WRITE] ||
- dstat[i].operations[DEVSTAT_NO_DATA] ||
- dstat[i].operations[DEVSTAT_FREE] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_ops)) {
- dm->st_ops = rrdset_create_localhost("disk_ops",
- disk,
- NULL,
- disk,
- "disk.ops",
- "Disk Completed I/O Operations",
- "operations/s",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_OPS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
-
- dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_ops_other = rrddim_add(dm->st_ops, "other", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_ops_free = rrddim_add(dm->st_ops, "frees", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_in, dstat[i].operations[DEVSTAT_READ]);
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_out, dstat[i].operations[DEVSTAT_WRITE]);
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_other, dstat[i].operations[DEVSTAT_NO_DATA]);
- rrddim_set_by_pointer(dm->st_ops, dm->rd_ops_free, dstat[i].operations[DEVSTAT_FREE]);
- rrdset_done(dm->st_ops);
- }
-
- if(dm->do_qops == CONFIG_BOOLEAN_YES || (dm->do_qops == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].start_count ||
- dstat[i].end_count ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_qops)) {
- dm->st_qops = rrdset_create_localhost("disk_qops",
- disk,
- NULL,
- disk,
- "disk.qops",
- "Disk Current I/O Operations",
- "operations",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_QOPS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
-
- dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(dm->st_qops, dm->rd_qops, dstat[i].start_count - dstat[i].end_count);
- rrdset_done(dm->st_qops);
- }
-
- if(dm->do_util == CONFIG_BOOLEAN_YES || (dm->do_util == CONFIG_BOOLEAN_AUTO &&
- (cur_dstat.busy_time_ms ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_util)) {
- dm->st_util = rrdset_create_localhost("disk_util",
- disk,
- NULL,
- disk,
- "disk.util",
- "Disk Utilization Time",
- "% of time working",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_UTIL,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
-
- dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(dm->st_util, dm->rd_util, cur_dstat.busy_time_ms);
- rrdset_done(dm->st_util);
- }
-
- if(dm->do_iotime == CONFIG_BOOLEAN_YES || (dm->do_iotime == CONFIG_BOOLEAN_AUTO &&
- (cur_dstat.duration_read_ms ||
- cur_dstat.duration_write_ms ||
- cur_dstat.duration_other_ms ||
- cur_dstat.duration_free_ms ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_iotime)) {
- dm->st_iotime = rrdset_create_localhost("disk_iotime",
- disk,
- NULL,
- disk,
- "disk.iotime",
- "Disk Total I/O Time",
- "milliseconds/s",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_IOTIME,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
-
- dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_iotime_other = rrddim_add(dm->st_iotime, "other", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- dm->rd_iotime_free = rrddim_add(dm->st_iotime, "frees", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_in, cur_dstat.duration_read_ms);
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_out, cur_dstat.duration_write_ms);
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_other, cur_dstat.duration_other_ms);
- rrddim_set_by_pointer(dm->st_iotime, dm->rd_iotime_free, cur_dstat.duration_free_ms);
- rrdset_done(dm->st_iotime);
- }
-
- // calculate differential charts
- // only if this is not the first time we run
-
- if (likely(dt)) {
- if(dm->do_await == CONFIG_BOOLEAN_YES || (dm->do_await == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] ||
- dstat[i].operations[DEVSTAT_WRITE] ||
- dstat[i].operations[DEVSTAT_NO_DATA] ||
- dstat[i].operations[DEVSTAT_FREE] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_await)) {
- dm->st_await = rrdset_create_localhost("disk_await",
- disk,
- NULL,
- disk,
- "disk.await",
- "Average Completed I/O Operation Time",
- "milliseconds/operation",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_AWAIT,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
-
- dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_await_other = rrddim_add(dm->st_await, "other", NULL, 1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_await_free = rrddim_add(dm->st_await, "frees", NULL, -1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_in,
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) ?
- (cur_dstat.duration_read_ms - dm->prev_dstat.duration_read_ms) /
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) :
- 0);
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_out,
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) ?
- (cur_dstat.duration_write_ms - dm->prev_dstat.duration_write_ms) /
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) :
- 0);
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_other,
- (dstat[i].operations[DEVSTAT_NO_DATA] -
- dm->prev_dstat.operations_other) ?
- (cur_dstat.duration_other_ms - dm->prev_dstat.duration_other_ms) /
- (dstat[i].operations[DEVSTAT_NO_DATA] -
- dm->prev_dstat.operations_other) :
- 0);
- rrddim_set_by_pointer(dm->st_await, dm->rd_await_free,
- (dstat[i].operations[DEVSTAT_FREE] -
- dm->prev_dstat.operations_free) ?
- (cur_dstat.duration_free_ms - dm->prev_dstat.duration_free_ms) /
- (dstat[i].operations[DEVSTAT_FREE] -
- dm->prev_dstat.operations_free) :
- 0);
- rrdset_done(dm->st_await);
- }
-
- if(dm->do_avagsz == CONFIG_BOOLEAN_YES || (dm->do_avagsz == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] ||
- dstat[i].operations[DEVSTAT_WRITE] ||
- dstat[i].operations[DEVSTAT_FREE] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_avagsz)) {
- dm->st_avagsz = rrdset_create_localhost("disk_avgsz",
- disk,
- NULL,
- disk,
- "disk.avgsz",
- "Average Completed I/O Operation Bandwidth",
- "KiB/operation",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_AVGSZ,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
-
- dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_ABSOLUTE);
- dm->rd_avagsz_free = rrddim_add(dm->st_avagsz, "frees", NULL, -1, KILO_FACTOR,
- RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_in,
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) ?
- (dstat[i].bytes[DEVSTAT_READ] - dm->prev_dstat.bytes_read) /
- (dstat[i].operations[DEVSTAT_READ] -
- dm->prev_dstat.operations_read) :
- 0);
- rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_out,
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) ?
- (dstat[i].bytes[DEVSTAT_WRITE] - dm->prev_dstat.bytes_write) /
- (dstat[i].operations[DEVSTAT_WRITE] -
- dm->prev_dstat.operations_write) :
- 0);
- rrddim_set_by_pointer(dm->st_avagsz, dm->rd_avagsz_free,
- (dstat[i].operations[DEVSTAT_FREE] -
- dm->prev_dstat.operations_free) ?
- (dstat[i].bytes[DEVSTAT_FREE] - dm->prev_dstat.bytes_free) /
- (dstat[i].operations[DEVSTAT_FREE] -
- dm->prev_dstat.operations_free) :
- 0);
- rrdset_done(dm->st_avagsz);
- }
-
- if(dm->do_svctm == CONFIG_BOOLEAN_YES || (dm->do_svctm == CONFIG_BOOLEAN_AUTO &&
- (dstat[i].operations[DEVSTAT_READ] ||
- dstat[i].operations[DEVSTAT_WRITE] ||
- dstat[i].operations[DEVSTAT_NO_DATA] ||
- dstat[i].operations[DEVSTAT_FREE] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!dm->st_svctm)) {
- dm->st_svctm = rrdset_create_localhost("disk_svctm",
- disk,
- NULL,
- disk,
- "disk.svctm",
- "Average Service Time",
- "milliseconds/operation",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_DISK_SVCTM,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
-
- dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
- RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(dm->st_svctm, dm->rd_svctm,
- ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
- (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
- (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
- (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) ?
- (cur_dstat.busy_time_ms - dm->prev_dstat.busy_time_ms) /
- ((dstat[i].operations[DEVSTAT_READ] - dm->prev_dstat.operations_read) +
- (dstat[i].operations[DEVSTAT_WRITE] - dm->prev_dstat.operations_write) +
- (dstat[i].operations[DEVSTAT_NO_DATA] - dm->prev_dstat.operations_other) +
- (dstat[i].operations[DEVSTAT_FREE] - dm->prev_dstat.operations_free)) :
- 0);
- rrdset_done(dm->st_svctm);
- }
-
- dm->prev_dstat.bytes_read = dstat[i].bytes[DEVSTAT_READ];
- dm->prev_dstat.bytes_write = dstat[i].bytes[DEVSTAT_WRITE];
- dm->prev_dstat.bytes_free = dstat[i].bytes[DEVSTAT_FREE];
- dm->prev_dstat.operations_read = dstat[i].operations[DEVSTAT_READ];
- dm->prev_dstat.operations_write = dstat[i].operations[DEVSTAT_WRITE];
- dm->prev_dstat.operations_other = dstat[i].operations[DEVSTAT_NO_DATA];
- dm->prev_dstat.operations_free = dstat[i].operations[DEVSTAT_FREE];
- dm->prev_dstat.duration_read_ms = cur_dstat.duration_read_ms;
- dm->prev_dstat.duration_write_ms = cur_dstat.duration_write_ms;
- dm->prev_dstat.duration_other_ms = cur_dstat.duration_other_ms;
- dm->prev_dstat.duration_free_ms = cur_dstat.duration_free_ms;
- dm->prev_dstat.busy_time_ms = cur_dstat.busy_time_ms;
- }
- }
- }
-
- if (likely(do_system_io)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "io",
- NULL,
- "disk",
- NULL,
- "Disk I/O",
- "KiB/s",
- "freebsd.plugin",
- "devstat",
- NETDATA_CHART_PRIO_SYSTEM_IO,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, total_disk_kbytes_read);
- rrddim_set_by_pointer(st, rd_out, total_disk_kbytes_write);
- rrdset_done(st);
- }
- }
- }
-
- if (unlikely(common_error)) {
- do_system_io = 0;
- collector_error("DISABLED: system.io chart");
- do_io = 0;
- collector_error("DISABLED: disk.* charts");
- do_ops = 0;
- collector_error("DISABLED: disk_ops.* charts");
- do_qops = 0;
- collector_error("DISABLED: disk_qops.* charts");
- do_util = 0;
- collector_error("DISABLED: disk_util.* charts");
- do_iotime = 0;
- collector_error("DISABLED: disk_iotime.* charts");
- do_await = 0;
- collector_error("DISABLED: disk_await.* charts");
- do_avagsz = 0;
- collector_error("DISABLED: disk_avgsz.* charts");
- do_svctm = 0;
- collector_error("DISABLED: disk_svctm.* charts");
- collector_error("DISABLED: kern.devstat module");
- return 1;
- }
- } else {
- collector_error("DISABLED: kern.devstat module");
- return 1;
- }
-
- disks_cleanup();
-
- return 0;
-}
diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c
deleted file mode 100644
index 36be68422..000000000
--- a/collectors/freebsd.plugin/freebsd_getifaddrs.c
+++ /dev/null
@@ -1,599 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-#include <ifaddrs.h>
-
-struct cgroup_network_interface {
- char *name;
- uint32_t hash;
- size_t len;
-
- // flags
- int configured;
- int enabled;
- int updated;
-
- int do_bandwidth;
- int do_packets;
- int do_errors;
- int do_drops;
- int do_events;
-
- // charts and dimensions
-
- RRDSET *st_bandwidth;
- RRDDIM *rd_bandwidth_in;
- RRDDIM *rd_bandwidth_out;
-
- RRDSET *st_packets;
- RRDDIM *rd_packets_in;
- RRDDIM *rd_packets_out;
- RRDDIM *rd_packets_m_in;
- RRDDIM *rd_packets_m_out;
-
- RRDSET *st_errors;
- RRDDIM *rd_errors_in;
- RRDDIM *rd_errors_out;
-
- RRDSET *st_drops;
- RRDDIM *rd_drops_in;
- RRDDIM *rd_drops_out;
-
- RRDSET *st_events;
- RRDDIM *rd_events_coll;
-
- struct cgroup_network_interface *next;
-};
-
-static struct cgroup_network_interface *network_interfaces_root = NULL, *network_interfaces_last_used = NULL;
-
-static size_t network_interfaces_added = 0, network_interfaces_found = 0;
-
-static void network_interface_free(struct cgroup_network_interface *ifm) {
- if (likely(ifm->st_bandwidth))
- rrdset_is_obsolete___safe_from_collector_thread(ifm->st_bandwidth);
- if (likely(ifm->st_packets))
- rrdset_is_obsolete___safe_from_collector_thread(ifm->st_packets);
- if (likely(ifm->st_errors))
- rrdset_is_obsolete___safe_from_collector_thread(ifm->st_errors);
- if (likely(ifm->st_drops))
- rrdset_is_obsolete___safe_from_collector_thread(ifm->st_drops);
- if (likely(ifm->st_events))
- rrdset_is_obsolete___safe_from_collector_thread(ifm->st_events);
-
- network_interfaces_added--;
- freez(ifm->name);
- freez(ifm);
-}
-
-static void network_interfaces_cleanup() {
- if (likely(network_interfaces_found == network_interfaces_added)) return;
-
- struct cgroup_network_interface *ifm = network_interfaces_root, *last = NULL;
- while(ifm) {
- if (unlikely(!ifm->updated)) {
- // collector_info("Removing network interface '%s', linked after '%s'", ifm->name, last?last->name:"ROOT");
-
- if (network_interfaces_last_used == ifm)
- network_interfaces_last_used = last;
-
- struct cgroup_network_interface *t = ifm;
-
- if (ifm == network_interfaces_root || !last)
- network_interfaces_root = ifm = ifm->next;
-
- else
- last->next = ifm = ifm->next;
-
- t->next = NULL;
- network_interface_free(t);
- }
- else {
- last = ifm;
- ifm->updated = 0;
- ifm = ifm->next;
- }
- }
-}
-
-static struct cgroup_network_interface *get_network_interface(const char *name) {
- struct cgroup_network_interface *ifm;
-
- uint32_t hash = simple_hash(name);
-
- // search it, from the last position to the end
- for(ifm = network_interfaces_last_used ; ifm ; ifm = ifm->next) {
- if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
- network_interfaces_last_used = ifm->next;
- return ifm;
- }
- }
-
- // search it from the beginning to the last position we used
- for(ifm = network_interfaces_root ; ifm != network_interfaces_last_used ; ifm = ifm->next) {
- if (unlikely(hash == ifm->hash && !strcmp(name, ifm->name))) {
- network_interfaces_last_used = ifm->next;
- return ifm;
- }
- }
-
- // create a new one
- ifm = callocz(1, sizeof(struct cgroup_network_interface));
- ifm->name = strdupz(name);
- ifm->hash = simple_hash(ifm->name);
- ifm->len = strlen(ifm->name);
- network_interfaces_added++;
-
- // link it to the end
- if (network_interfaces_root) {
- struct cgroup_network_interface *e;
- for(e = network_interfaces_root; e->next ; e = e->next) ;
- e->next = ifm;
- }
- else
- network_interfaces_root = ifm;
-
- return ifm;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// getifaddrs
-
-int do_getifaddrs(int update_every, usec_t dt) {
- (void)dt;
-
-#define DEFAULT_EXCLUDED_INTERFACES "lo*"
-#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc*"
-#define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs"
-
- static int enable_new_interfaces = -1;
- static int do_bandwidth_ipv4 = -1, do_bandwidth_ipv6 = -1, do_bandwidth = -1, do_packets = -1, do_bandwidth_net = -1, do_packets_net = -1,
- do_errors = -1, do_drops = -1, do_events = -1;
- static SIMPLE_PATTERN *excluded_interfaces = NULL, *physical_interfaces = NULL;
-
- if (unlikely(enable_new_interfaces == -1)) {
- enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS,
- "enable new interfaces detected at runtime",
- CONFIG_BOOLEAN_AUTO);
-
- do_bandwidth_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for physical interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_packets_net = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total packets for physical interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_bandwidth_ipv4 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv4 interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_bandwidth_ipv6 = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "total bandwidth for ipv6 interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "bandwidth for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_packets = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "packets for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_errors = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "errors for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_drops = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "drops for all interfaces",
- CONFIG_BOOLEAN_AUTO);
- do_events = config_get_boolean_ondemand(CONFIG_SECTION_GETIFADDRS, "collisions for all interfaces",
- CONFIG_BOOLEAN_AUTO);
-
- excluded_interfaces = simple_pattern_create(
- config_get(CONFIG_SECTION_GETIFADDRS, "disable by default interfaces matching", DEFAULT_EXCLUDED_INTERFACES),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
- physical_interfaces = simple_pattern_create(
- config_get(CONFIG_SECTION_GETIFADDRS, "set physical interfaces for system.net", DEFAULT_PHYSICAL_INTERFACES),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
- }
-
- if (likely(do_bandwidth_ipv4 || do_bandwidth_ipv6 || do_bandwidth || do_packets || do_errors || do_bandwidth_net || do_packets_net ||
- do_drops || do_events)) {
- struct ifaddrs *ifap;
-
- if (unlikely(getifaddrs(&ifap))) {
- collector_error("FREEBSD: getifaddrs() failed");
- do_bandwidth_net = 0;
- collector_error("DISABLED: system.net chart");
- do_packets_net = 0;
- collector_error("DISABLED: system.packets chart");
- do_bandwidth_ipv4 = 0;
- collector_error("DISABLED: system.ipv4 chart");
- do_bandwidth_ipv6 = 0;
- collector_error("DISABLED: system.ipv6 chart");
- do_bandwidth = 0;
- collector_error("DISABLED: net.* charts");
- do_packets = 0;
- collector_error("DISABLED: net_packets.* charts");
- do_errors = 0;
- collector_error("DISABLED: net_errors.* charts");
- do_drops = 0;
- collector_error("DISABLED: net_drops.* charts");
- do_events = 0;
- collector_error("DISABLED: net_events.* charts");
- collector_error("DISABLED: getifaddrs module");
- return 1;
- } else {
-#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
- struct ifaddrs *ifa;
- struct iftot {
- u_long ift_ibytes;
- u_long ift_obytes;
- u_long ift_ipackets;
- u_long ift_opackets;
- u_long ift_imcasts;
- u_long ift_omcasts;
- } iftot = {0, 0, 0, 0, 0, 0};
-
- if (likely(do_bandwidth_net)) {
-
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_LINK)
- continue;
- if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
- continue;
- iftot.ift_ibytes += IFA_DATA(ibytes);
- iftot.ift_obytes += IFA_DATA(obytes);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "net",
- NULL,
- "network",
- NULL,
- "Network Traffic",
- "kilobits/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_NET,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
- rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
- rrdset_done(st);
- }
-
- if (likely(do_packets_net)) {
- iftot.ift_ipackets = iftot.ift_opackets = iftot.ift_imcasts = iftot.ift_omcasts = 0;
-
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_LINK)
- continue;
- if (!simple_pattern_matches(physical_interfaces, ifa->ifa_name))
- continue;
- iftot.ift_ipackets += IFA_DATA(ipackets);
- iftot.ift_opackets += IFA_DATA(opackets);
- iftot.ift_imcasts += IFA_DATA(imcasts);
- iftot.ift_omcasts += IFA_DATA(omcasts);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_packets_in = NULL, *rd_packets_out = NULL, *rd_packets_m_in = NULL, *rd_packets_m_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "packets",
- NULL,
- "network",
- NULL,
- "Network Packets",
- "packets/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_PACKETS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_packets_m_out = rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_packets_in, iftot.ift_ipackets);
- rrddim_set_by_pointer(st, rd_packets_out, iftot.ift_opackets);
- rrddim_set_by_pointer(st, rd_packets_m_in, iftot.ift_imcasts);
- rrddim_set_by_pointer(st, rd_packets_m_out, iftot.ift_omcasts);
- rrdset_done(st);
- }
-
- if (likely(do_bandwidth_ipv4)) {
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_INET)
- continue;
- iftot.ift_ibytes += IFA_DATA(ibytes);
- iftot.ift_obytes += IFA_DATA(obytes);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "ipv4",
- NULL,
- "network",
- NULL,
- "IPv4 Bandwidth",
- "kilobits/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_IPV4,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
- rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
- rrdset_done(st);
- }
-
- if (likely(do_bandwidth_ipv6)) {
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_INET6)
- continue;
- iftot.ift_ibytes += IFA_DATA(ibytes);
- iftot.ift_obytes += IFA_DATA(obytes);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost("system",
- "ipv6",
- NULL,
- "network",
- NULL,
- "IPv6 Bandwidth",
- "kilobits/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_SYSTEM_IPV6,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, iftot.ift_ibytes);
- rrddim_set_by_pointer(st, rd_out, iftot.ift_obytes);
- rrdset_done(st);
- }
-
- network_interfaces_found = 0;
-
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_LINK)
- continue;
-
- struct cgroup_network_interface *ifm = get_network_interface(ifa->ifa_name);
- ifm->updated = 1;
- network_interfaces_found++;
-
- if (unlikely(!ifm->configured)) {
- char var_name[4096 + 1];
-
- // this is the first time we see this network interface
-
- // remember we configured it
- ifm->configured = 1;
-
- ifm->enabled = enable_new_interfaces;
-
- if (likely(ifm->enabled))
- ifm->enabled = !simple_pattern_matches(excluded_interfaces, ifa->ifa_name);
-
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETIFADDRS, ifa->ifa_name);
- ifm->enabled = config_get_boolean_ondemand(var_name, "enabled", ifm->enabled);
-
- if (unlikely(ifm->enabled == CONFIG_BOOLEAN_NO))
- continue;
-
- ifm->do_bandwidth = config_get_boolean_ondemand(var_name, "bandwidth", do_bandwidth);
- ifm->do_packets = config_get_boolean_ondemand(var_name, "packets", do_packets);
- ifm->do_errors = config_get_boolean_ondemand(var_name, "errors", do_errors);
- ifm->do_drops = config_get_boolean_ondemand(var_name, "drops", do_drops);
- ifm->do_events = config_get_boolean_ondemand(var_name, "events", do_events);
- }
-
- if (unlikely(!ifm->enabled))
- continue;
-
- if (ifm->do_bandwidth == CONFIG_BOOLEAN_YES || (ifm->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ibytes) ||
- IFA_DATA(obytes) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!ifm->st_bandwidth)) {
- ifm->st_bandwidth = rrdset_create_localhost("net",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.net",
- "Bandwidth",
- "kilobits/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_IFACE,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- ifm->rd_bandwidth_in = rrddim_add(ifm->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_bandwidth_out = rrddim_add(ifm->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_in, IFA_DATA(ibytes));
- rrddim_set_by_pointer(ifm->st_bandwidth, ifm->rd_bandwidth_out, IFA_DATA(obytes));
- rrdset_done(ifm->st_bandwidth);
- }
-
- if (ifm->do_packets == CONFIG_BOOLEAN_YES || (ifm->do_packets == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ipackets) ||
- IFA_DATA(opackets) ||
- IFA_DATA(imcasts) ||
- IFA_DATA(omcasts) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!ifm->st_packets)) {
- ifm->st_packets = rrdset_create_localhost("net_packets",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.packets",
- "Packets",
- "packets/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_PACKETS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
-
- ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_m_in = rrddim_add(ifm->st_packets, "multicast_received", NULL, 1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_packets_m_out = rrddim_add(ifm->st_packets, "multicast_sent", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_in, IFA_DATA(ipackets));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_out, IFA_DATA(opackets));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_in, IFA_DATA(imcasts));
- rrddim_set_by_pointer(ifm->st_packets, ifm->rd_packets_m_out, IFA_DATA(omcasts));
- rrdset_done(ifm->st_packets);
- }
-
- if (ifm->do_errors == CONFIG_BOOLEAN_YES || (ifm->do_errors == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(ierrors) ||
- IFA_DATA(oerrors) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!ifm->st_errors)) {
- ifm->st_errors = rrdset_create_localhost("net_errors",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.errors",
- "Interface Errors",
- "errors/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_ERRORS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
-
- ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_in, IFA_DATA(ierrors));
- rrddim_set_by_pointer(ifm->st_errors, ifm->rd_errors_out, IFA_DATA(oerrors));
- rrdset_done(ifm->st_errors);
- }
-
- if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(iqdrops) ||
- #if __FreeBSD__ >= 11
- IFA_DATA(oqdrops) ||
- #endif
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!ifm->st_drops)) {
- ifm->st_drops = rrdset_create_localhost("net_drops",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.drops",
- "Interface Drops",
- "drops/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_DROPS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
-
- ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#if __FreeBSD__ >= 11
- ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- }
-
- rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_in, IFA_DATA(iqdrops));
-#if __FreeBSD__ >= 11
- rrddim_set_by_pointer(ifm->st_drops, ifm->rd_drops_out, IFA_DATA(oqdrops));
-#endif
- rrdset_done(ifm->st_drops);
- }
-
- if (ifm->do_events == CONFIG_BOOLEAN_YES || (ifm->do_events == CONFIG_BOOLEAN_AUTO &&
- (IFA_DATA(collisions) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!ifm->st_events)) {
- ifm->st_events = rrdset_create_localhost("net_events",
- ifa->ifa_name,
- NULL,
- ifa->ifa_name,
- "net.events",
- "Network Interface Events",
- "events/s",
- "freebsd.plugin",
- "getifaddrs",
- NETDATA_CHART_PRIO_FIRST_NET_EVENTS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
-
- ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
- RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(ifm->st_events, ifm->rd_events_coll, IFA_DATA(collisions));
- rrdset_done(ifm->st_events);
- }
- }
-
- freeifaddrs(ifap);
- }
- } else {
- collector_error("DISABLED: getifaddrs module");
- return 1;
- }
-
- network_interfaces_cleanup();
-
- return 0;
-}
diff --git a/collectors/freebsd.plugin/freebsd_getmntinfo.c b/collectors/freebsd.plugin/freebsd_getmntinfo.c
deleted file mode 100644
index d55eb3d4a..000000000
--- a/collectors/freebsd.plugin/freebsd_getmntinfo.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-#include <sys/mount.h>
-
-struct mount_point {
- char *name;
- uint32_t hash;
- size_t len;
-
- // flags
- int configured;
- int enabled;
- int updated;
-
- int do_space;
- int do_inodes;
-
- size_t collected; // the number of times this has been collected
-
- // charts and dimensions
-
- RRDSET *st_space;
- RRDDIM *rd_space_used;
- RRDDIM *rd_space_avail;
- RRDDIM *rd_space_reserved;
-
- RRDSET *st_inodes;
- RRDDIM *rd_inodes_used;
- RRDDIM *rd_inodes_avail;
-
- struct mount_point *next;
-};
-
-static struct mount_point *mount_points_root = NULL, *mount_points_last_used = NULL;
-
-static size_t mount_points_added = 0, mount_points_found = 0;
-
-static void mount_point_free(struct mount_point *m) {
- if (likely(m->st_space))
- rrdset_is_obsolete___safe_from_collector_thread(m->st_space);
- if (likely(m->st_inodes))
- rrdset_is_obsolete___safe_from_collector_thread(m->st_inodes);
-
- mount_points_added--;
- freez(m->name);
- freez(m);
-}
-
-static void mount_points_cleanup() {
- if (likely(mount_points_found == mount_points_added)) return;
-
- struct mount_point *m = mount_points_root, *last = NULL;
- while(m) {
- if (unlikely(!m->updated)) {
- // collector_info("Removing mount point '%s', linked after '%s'", m->name, last?last->name:"ROOT");
-
- if (mount_points_last_used == m)
- mount_points_last_used = last;
-
- struct mount_point *t = m;
-
- if (m == mount_points_root || !last)
- mount_points_root = m = m->next;
-
- else
- last->next = m = m->next;
-
- t->next = NULL;
- mount_point_free(t);
- }
- else {
- last = m;
- m->updated = 0;
- m = m->next;
- }
- }
-}
-
-static struct mount_point *get_mount_point(const char *name) {
- struct mount_point *m;
-
- uint32_t hash = simple_hash(name);
-
- // search it, from the last position to the end
- for(m = mount_points_last_used ; m ; m = m->next) {
- if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
- mount_points_last_used = m->next;
- return m;
- }
- }
-
- // search it from the beginning to the last position we used
- for(m = mount_points_root ; m != mount_points_last_used ; m = m->next) {
- if (unlikely(hash == m->hash && !strcmp(name, m->name))) {
- mount_points_last_used = m->next;
- return m;
- }
- }
-
- // create a new one
- m = callocz(1, sizeof(struct mount_point));
- m->name = strdupz(name);
- m->hash = simple_hash(m->name);
- m->len = strlen(m->name);
- mount_points_added++;
-
- // link it to the end
- if (mount_points_root) {
- struct mount_point *e;
- for(e = mount_points_root; e->next ; e = e->next) ;
- e->next = m;
- }
- else
- mount_points_root = m;
-
- return m;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// getmntinfo
-
-int do_getmntinfo(int update_every, usec_t dt) {
- (void)dt;
-
-#define DEFAULT_EXCLUDED_PATHS "/proc/*"
-// taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
-#define DEFAULT_EXCLUDED_FILESYSTEMS "autofs procfs subfs devfs none"
-#define CONFIG_SECTION_GETMNTINFO "plugin:freebsd:getmntinfo"
-
- static int enable_new_mount_points = -1;
- static int do_space = -1, do_inodes = -1;
- static SIMPLE_PATTERN *excluded_mountpoints = NULL;
- static SIMPLE_PATTERN *excluded_filesystems = NULL;
-
- if (unlikely(enable_new_mount_points == -1)) {
- enable_new_mount_points = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO,
- "enable new mount points detected at runtime",
- CONFIG_BOOLEAN_AUTO);
-
- do_space = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "space usage for all disks", CONFIG_BOOLEAN_AUTO);
- do_inodes = config_get_boolean_ondemand(CONFIG_SECTION_GETMNTINFO, "inodes usage for all disks", CONFIG_BOOLEAN_AUTO);
-
- excluded_mountpoints = simple_pattern_create(
- config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on paths", DEFAULT_EXCLUDED_PATHS),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
-
- excluded_filesystems = simple_pattern_create(
- config_get(CONFIG_SECTION_GETMNTINFO, "exclude space metrics on filesystems", DEFAULT_EXCLUDED_FILESYSTEMS),
- NULL,
- SIMPLE_PATTERN_EXACT,
- true);
- }
-
- if (likely(do_space || do_inodes)) {
- struct statfs *mntbuf;
- int mntsize;
-
- // there is no mount info in sysctl MIBs
- if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
- collector_error("FREEBSD: getmntinfo() failed");
- do_space = 0;
- collector_error("DISABLED: disk_space.* charts");
- do_inodes = 0;
- collector_error("DISABLED: disk_inodes.* charts");
- collector_error("DISABLED: getmntinfo module");
- return 1;
- } else {
- int i;
-
- mount_points_found = 0;
-
- for (i = 0; i < mntsize; i++) {
- char title[4096 + 1];
-
- struct mount_point *m = get_mount_point(mntbuf[i].f_mntonname);
- m->updated = 1;
- mount_points_found++;
-
- if (unlikely(!m->configured)) {
- char var_name[4096 + 1];
-
- // this is the first time we see this filesystem
-
- // remember we configured it
- m->configured = 1;
-
- m->enabled = enable_new_mount_points;
-
- if (likely(m->enabled))
- m->enabled = !(simple_pattern_matches(excluded_mountpoints, mntbuf[i].f_mntonname)
- || simple_pattern_matches(excluded_filesystems, mntbuf[i].f_fstypename));
-
- snprintfz(var_name, 4096, "%s:%s", CONFIG_SECTION_GETMNTINFO, mntbuf[i].f_mntonname);
- m->enabled = config_get_boolean_ondemand(var_name, "enabled", m->enabled);
-
- if (unlikely(m->enabled == CONFIG_BOOLEAN_NO))
- continue;
-
- m->do_space = config_get_boolean_ondemand(var_name, "space usage", do_space);
- m->do_inodes = config_get_boolean_ondemand(var_name, "inodes usage", do_inodes);
- }
-
- if (unlikely(!m->enabled))
- continue;
-
- if (unlikely(mntbuf[i].f_flags & MNT_RDONLY && !m->collected))
- continue;
-
- int rendered = 0;
-
- if (m->do_space == CONFIG_BOOLEAN_YES || (m->do_space == CONFIG_BOOLEAN_AUTO &&
- (mntbuf[i].f_blocks > 2 ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!m->st_space)) {
- snprintfz(title, sizeof(title) - 1, "Disk Space Usage for %s [%s]",
- mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- m->st_space = rrdset_create_localhost("disk_space",
- mntbuf[i].f_mntonname,
- NULL,
- mntbuf[i].f_mntonname,
- "disk.space",
- title,
- "GiB",
- "freebsd.plugin",
- "getmntinfo",
- NETDATA_CHART_PRIO_DISKSPACE_SPACE,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- m->rd_space_avail = rrddim_add(m->st_space, "avail", NULL,
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_used = rrddim_add(m->st_space, "used", NULL,
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- m->rd_space_reserved = rrddim_add(m->st_space, "reserved_for_root", "reserved for root",
- mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(m->st_space, m->rd_space_avail, (collected_number) mntbuf[i].f_bavail);
- rrddim_set_by_pointer(m->st_space, m->rd_space_used, (collected_number) (mntbuf[i].f_blocks -
- mntbuf[i].f_bfree));
- rrddim_set_by_pointer(m->st_space, m->rd_space_reserved, (collected_number) (mntbuf[i].f_bfree -
- mntbuf[i].f_bavail));
- rrdset_done(m->st_space);
-
- rendered++;
- }
-
- if (m->do_inodes == CONFIG_BOOLEAN_YES || (m->do_inodes == CONFIG_BOOLEAN_AUTO &&
- (mntbuf[i].f_files > 1 ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- if (unlikely(!m->st_inodes)) {
- snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage for %s [%s]",
- mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- m->st_inodes = rrdset_create_localhost("disk_inodes",
- mntbuf[i].f_mntonname,
- NULL,
- mntbuf[i].f_mntonname,
- "disk.inodes",
- title,
- "inodes",
- "freebsd.plugin",
- "getmntinfo",
- NETDATA_CHART_PRIO_DISKSPACE_INODES,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- m->rd_inodes_avail = rrddim_add(m->st_inodes, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- m->rd_inodes_used = rrddim_add(m->st_inodes, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_avail, (collected_number) mntbuf[i].f_ffree);
- rrddim_set_by_pointer(m->st_inodes, m->rd_inodes_used, (collected_number) (mntbuf[i].f_files -
- mntbuf[i].f_ffree));
- rrdset_done(m->st_inodes);
-
- rendered++;
- }
-
- if (likely(rendered))
- m->collected++;
- }
- }
- } else {
- collector_error("DISABLED: getmntinfo module");
- return 1;
- }
-
- mount_points_cleanup();
-
- return 0;
-}
diff --git a/collectors/freebsd.plugin/freebsd_ipfw.c b/collectors/freebsd.plugin/freebsd_ipfw.c
deleted file mode 100644
index dcb771ce9..000000000
--- a/collectors/freebsd.plugin/freebsd_ipfw.c
+++ /dev/null
@@ -1,359 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-#include <netinet/ip_fw.h>
-
-#define FREE_MEM_THRESHOLD 10000 // number of unused chunks that trigger memory freeing
-
-#define COMMON_IPFW_ERROR() collector_error("DISABLED: ipfw.packets chart"); \
- collector_error("DISABLED: ipfw.bytes chart"); \
- collector_error("DISABLED: ipfw.dyn_active chart"); \
- collector_error("DISABLED: ipfw.dyn_expired chart"); \
- collector_error("DISABLED: ipfw.mem chart");
-
-// --------------------------------------------------------------------------------------------------------------------
-// ipfw
-
-int do_ipfw(int update_every, usec_t dt) {
- (void)dt;
-#if __FreeBSD__ >= 11
-
- static int do_static = -1, do_dynamic = -1, do_mem = -1;
-
- if (unlikely(do_static == -1)) {
- do_static = config_get_boolean("plugin:freebsd:ipfw", "counters for static rules", 1);
- do_dynamic = config_get_boolean("plugin:freebsd:ipfw", "number of dynamic rules", 1);
- do_mem = config_get_boolean("plugin:freebsd:ipfw", "allocated memory", 1);
- }
-
- // variables for getting ipfw configuration
-
- int error;
- static int ipfw_socket = -1;
- static ipfw_cfg_lheader *cfg = NULL;
- ip_fw3_opheader *op3 = NULL;
- static socklen_t *optlen = NULL, cfg_size = 0;
-
- // variables for static rules handling
-
- ipfw_obj_ctlv *ctlv = NULL;
- ipfw_obj_tlv *rbase = NULL;
- int rcnt = 0;
-
- int n, seen;
- struct ip_fw_rule *rule;
- struct ip_fw_bcounter *cntr;
- int c = 0;
-
- char rule_num_str[12];
-
- // variables for dynamic rules handling
-
- caddr_t dynbase = NULL;
- size_t dynsz = 0;
- size_t readsz = sizeof(*cfg);;
- int ttype = 0;
- ipfw_obj_tlv *tlv;
- ipfw_dyn_rule *dyn_rule;
- uint16_t rulenum, prev_rulenum = IPFW_DEFAULT_RULE;
- unsigned srn, static_rules_num = 0;
- static size_t dyn_rules_num_size = 0;
-
- static struct dyn_rule_num {
- uint16_t rule_num;
- uint32_t active_rules;
- uint32_t expired_rules;
- } *dyn_rules_num = NULL;
-
- uint32_t *dyn_rules_counter;
-
- if (likely(do_static | do_dynamic | do_mem)) {
-
- // initialize the smallest ipfw_cfg_lheader possible
-
- if (unlikely((optlen == NULL) || (cfg == NULL))) {
- optlen = reallocz(optlen, sizeof(socklen_t));
- *optlen = cfg_size = 32;
- cfg = reallocz(cfg, *optlen);
- }
-
- // get socket descriptor and initialize ipfw_cfg_lheader structure
-
- if (unlikely(ipfw_socket == -1))
- ipfw_socket = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
- if (unlikely(ipfw_socket == -1)) {
- collector_error("FREEBSD: can't get socket for ipfw configuration");
- collector_error("FREEBSD: run netdata as root to get access to ipfw data");
- COMMON_IPFW_ERROR();
- return 1;
- }
-
- bzero(cfg, 32);
- cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
- op3 = &cfg->opheader;
- op3->opcode = IP_FW_XGET;
-
- // get ifpw configuration size than get configuration
-
- *optlen = cfg_size;
- error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
- if (error)
- if (errno != ENOMEM) {
- collector_error("FREEBSD: ipfw socket reading error");
- COMMON_IPFW_ERROR();
- return 1;
- }
- if ((cfg->size > cfg_size) || ((cfg_size - cfg->size) > sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
- *optlen = cfg_size = cfg->size;
- cfg = reallocz(cfg, *optlen);
- bzero(cfg, 32);
- cfg->flags = IPFW_CFG_GET_STATIC | IPFW_CFG_GET_COUNTERS | IPFW_CFG_GET_STATES;
- op3 = &cfg->opheader;
- op3->opcode = IP_FW_XGET;
- error = getsockopt(ipfw_socket, IPPROTO_IP, IP_FW3, op3, optlen);
- if (error) {
- collector_error("FREEBSD: ipfw socket reading error");
- COMMON_IPFW_ERROR();
- return 1;
- }
- }
-
- // go through static rules configuration structures
-
- ctlv = (ipfw_obj_ctlv *) (cfg + 1);
-
- if (cfg->flags & IPFW_CFG_GET_STATIC) {
- /* We've requested static rules */
- if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
- readsz += ctlv->head.length;
- ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv +
- ctlv->head.length);
- }
-
- if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
- rbase = (ipfw_obj_tlv *) (ctlv + 1);
- rcnt = ctlv->count;
- readsz += ctlv->head.length;
- ctlv = (ipfw_obj_ctlv *) ((caddr_t) ctlv + ctlv->head.length);
- }
- }
-
- if ((cfg->flags & IPFW_CFG_GET_STATES) && (readsz != *optlen)) {
- /* We may have some dynamic states */
- dynsz = *optlen - readsz;
- /* Skip empty header */
- if (dynsz != sizeof(ipfw_obj_ctlv))
- dynbase = (caddr_t) ctlv;
- else
- dynsz = 0;
- }
-
- if (likely(do_mem)) {
- static RRDSET *st_mem = NULL;
- static RRDDIM *rd_dyn_mem = NULL;
- static RRDDIM *rd_stat_mem = NULL;
-
- if (unlikely(!st_mem)) {
- st_mem = rrdset_create_localhost("ipfw",
- "mem",
- NULL,
- "memory allocated",
- NULL,
- "Memory allocated by rules",
- "bytes",
- "freebsd.plugin",
- "ipfw",
- NETDATA_CHART_PRIO_IPFW_MEM,
- update_every,
- RRDSET_TYPE_STACKED
- );
- rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL);
-
- rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem, rd_dyn_mem, dynsz);
- rrddim_set_by_pointer(st_mem, rd_stat_mem, *optlen - dynsz);
- rrdset_done(st_mem);
- }
-
- static RRDSET *st_packets = NULL, *st_bytes = NULL;
- RRDDIM *rd_packets = NULL, *rd_bytes = NULL;
-
- if (likely(do_static || do_dynamic)) {
- if (likely(do_static)) {
- if (unlikely(!st_packets)) {
- st_packets = rrdset_create_localhost("ipfw",
- "packets",
- NULL,
- "static rules",
- NULL,
- "Packets",
- "packets/s",
- "freebsd.plugin",
- "ipfw",
- NETDATA_CHART_PRIO_IPFW_PACKETS,
- update_every,
- RRDSET_TYPE_STACKED
- );
- }
-
- if (unlikely(!st_bytes)) {
- st_bytes = rrdset_create_localhost("ipfw",
- "bytes",
- NULL,
- "static rules",
- NULL,
- "Bytes",
- "bytes/s",
- "freebsd.plugin",
- "ipfw",
- NETDATA_CHART_PRIO_IPFW_BYTES,
- update_every,
- RRDSET_TYPE_STACKED
- );
- }
- }
-
- for (n = seen = 0; n < rcnt; n++, rbase = (ipfw_obj_tlv *) ((caddr_t) rbase + rbase->length)) {
- cntr = (struct ip_fw_bcounter *) (rbase + 1);
- rule = (struct ip_fw_rule *) ((caddr_t) cntr + cntr->size);
- if (rule->rulenum != prev_rulenum)
- static_rules_num++;
- if (rule->rulenum > IPFW_DEFAULT_RULE)
- break;
-
- if (likely(do_static)) {
- sprintf(rule_num_str, "%"PRIu32"_%"PRIu32"", (uint32_t)rule->rulenum, (uint32_t)rule->id);
-
- rd_packets = rrddim_find_active(st_packets, rule_num_str);
- if (unlikely(!rd_packets))
- rd_packets = rrddim_add(st_packets, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_set_by_pointer(st_packets, rd_packets, cntr->pcnt);
-
- rd_bytes = rrddim_find_active(st_bytes, rule_num_str);
- if (unlikely(!rd_bytes))
- rd_bytes = rrddim_add(st_bytes, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_set_by_pointer(st_bytes, rd_bytes, cntr->bcnt);
- }
-
- c += rbase->length;
- seen++;
- }
-
- if (likely(do_static)) {
- rrdset_done(st_packets);
- rrdset_done(st_bytes);
- }
- }
-
- // go through dynamic rules configuration structures
-
- if (likely(do_dynamic && (dynsz > 0))) {
- if ((dyn_rules_num_size < sizeof(struct dyn_rule_num) * static_rules_num) ||
- ((dyn_rules_num_size - sizeof(struct dyn_rule_num) * static_rules_num) >
- sizeof(struct dyn_rule_num) * FREE_MEM_THRESHOLD)) {
- dyn_rules_num_size = sizeof(struct dyn_rule_num) * static_rules_num;
- dyn_rules_num = reallocz(dyn_rules_num, dyn_rules_num_size);
- }
- bzero(dyn_rules_num, sizeof(struct dyn_rule_num) * static_rules_num);
- dyn_rules_num->rule_num = IPFW_DEFAULT_RULE;
-
- if (dynsz > 0 && ctlv->head.type == IPFW_TLV_DYNSTATE_LIST) {
- dynbase += sizeof(*ctlv);
- dynsz -= sizeof(*ctlv);
- ttype = IPFW_TLV_DYN_ENT;
- }
-
- while (dynsz > 0) {
- tlv = (ipfw_obj_tlv *) dynbase;
- if (tlv->type != ttype)
- break;
-
- dyn_rule = (ipfw_dyn_rule *) (tlv + 1);
- bcopy(&dyn_rule->rule, &rulenum, sizeof(rulenum));
-
- for (srn = 0; srn < (static_rules_num - 1); srn++) {
- if (dyn_rule->expire > 0)
- dyn_rules_counter = &dyn_rules_num[srn].active_rules;
- else
- dyn_rules_counter = &dyn_rules_num[srn].expired_rules;
- if (dyn_rules_num[srn].rule_num == rulenum) {
- (*dyn_rules_counter)++;
- break;
- }
- if (dyn_rules_num[srn].rule_num == IPFW_DEFAULT_RULE) {
- dyn_rules_num[srn].rule_num = rulenum;
- dyn_rules_num[srn + 1].rule_num = IPFW_DEFAULT_RULE;
- (*dyn_rules_counter)++;
- break;
- }
- }
-
- dynsz -= tlv->length;
- dynbase += tlv->length;
- }
-
- static RRDSET *st_active = NULL, *st_expired = NULL;
- RRDDIM *rd_active = NULL, *rd_expired = NULL;
-
- if (unlikely(!st_active)) {
- st_active = rrdset_create_localhost("ipfw",
- "active",
- NULL,
- "dynamic_rules",
- NULL,
- "Active rules",
- "rules",
- "freebsd.plugin",
- "ipfw",
- NETDATA_CHART_PRIO_IPFW_ACTIVE,
- update_every,
- RRDSET_TYPE_STACKED
- );
- }
-
- if (unlikely(!st_expired)) {
- st_expired = rrdset_create_localhost("ipfw",
- "expired",
- NULL,
- "dynamic_rules",
- NULL,
- "Expired rules",
- "rules",
- "freebsd.plugin",
- "ipfw",
- NETDATA_CHART_PRIO_IPFW_EXPIRED,
- update_every,
- RRDSET_TYPE_STACKED
- );
- }
-
- for (srn = 0; (srn < (static_rules_num - 1)) && (dyn_rules_num[srn].rule_num != IPFW_DEFAULT_RULE); srn++) {
- sprintf(rule_num_str, "%d", dyn_rules_num[srn].rule_num);
-
- rd_active = rrddim_find_active(st_active, rule_num_str);
- if (unlikely(!rd_active))
- rd_active = rrddim_add(st_active, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_set_by_pointer(st_active, rd_active, dyn_rules_num[srn].active_rules);
-
- rd_expired = rrddim_find_active(st_expired, rule_num_str);
- if (unlikely(!rd_expired))
- rd_expired = rrddim_add(st_expired, rule_num_str, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_set_by_pointer(st_expired, rd_expired, dyn_rules_num[srn].expired_rules);
- }
-
- rrdset_done(st_active);
- rrdset_done(st_expired);
- }
- }
-
- return 0;
-#else
- collector_error("FREEBSD: ipfw charts supported for FreeBSD 11.0 and newer releases only");
- COMMON_IPFW_ERROR();
- return 1;
-#endif
-}
diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c
deleted file mode 100644
index 165efa17c..000000000
--- a/collectors/freebsd.plugin/freebsd_kstat_zfs.c
+++ /dev/null
@@ -1,304 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-#include "collectors/proc.plugin/zfs_common.h"
-
-extern struct arcstats arcstats;
-
-unsigned long long zfs_arcstats_shrinkable_cache_size_bytes = 0;
-
-// kstat.zfs.misc.arcstats
-
-int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) {
- (void)dt;
-
- static int show_zero_charts = -1;
- if(unlikely(show_zero_charts == -1))
- show_zero_charts = config_get_boolean_ondemand("plugin:freebsd:zfs_arcstats", "show zero charts", CONFIG_BOOLEAN_NO);
-
- unsigned long long l2_size;
- size_t uint64_t_size = sizeof(uint64_t);
- static struct mibs {
- int hits[5];
- int misses[5];
- int demand_data_hits[5];
- int demand_data_misses[5];
- int demand_metadata_hits[5];
- int demand_metadata_misses[5];
- int prefetch_data_hits[5];
- int prefetch_data_misses[5];
- int prefetch_metadata_hits[5];
- int prefetch_metadata_misses[5];
- int mru_hits[5];
- int mru_ghost_hits[5];
- int mfu_hits[5];
- int mfu_ghost_hits[5];
- int deleted[5];
- int mutex_miss[5];
- int evict_skip[5];
- // int evict_not_enough[5];
- // int evict_l2_cached[5];
- // int evict_l2_eligible[5];
- // int evict_l2_ineligible[5];
- // int evict_l2_skip[5];
- int hash_elements[5];
- int hash_elements_max[5];
- int hash_collisions[5];
- int hash_chains[5];
- int hash_chain_max[5];
- int p[5];
- int c[5];
- int c_min[5];
- int c_max[5];
- int size[5];
- // int hdr_size[5];
- // int data_size[5];
- // int metadata_size[5];
- // int other_size[5];
- // int anon_size[5];
- // int anon_evictable_data[5];
- // int anon_evictable_metadata[5];
- int mru_size[5];
- // int mru_evictable_data[5];
- // int mru_evictable_metadata[5];
- // int mru_ghost_size[5];
- // int mru_ghost_evictable_data[5];
- // int mru_ghost_evictable_metadata[5];
- int mfu_size[5];
- // int mfu_evictable_data[5];
- // int mfu_evictable_metadata[5];
- // int mfu_ghost_size[5];
- // int mfu_ghost_evictable_data[5];
- // int mfu_ghost_evictable_metadata[5];
- int l2_hits[5];
- int l2_misses[5];
- // int l2_feeds[5];
- // int l2_rw_clash[5];
- int l2_read_bytes[5];
- int l2_write_bytes[5];
- // int l2_writes_sent[5];
- // int l2_writes_done[5];
- // int l2_writes_error[5];
- // int l2_writes_lock_retry[5];
- // int l2_evict_lock_retry[5];
- // int l2_evict_reading[5];
- // int l2_evict_l1cached[5];
- // int l2_free_on_write[5];
- // int l2_cdata_free_on_write[5];
- // int l2_abort_lowmem[5];
- // int l2_cksum_bad[5];
- // int l2_io_error[5];
- int l2_size[5];
- int l2_asize[5];
- // int l2_hdr_size[5];
- // int l2_compress_successes[5];
- // int l2_compress_zeros[5];
- // int l2_compress_failures[5];
- int memory_throttle_count[5];
- // int duplicate_buffers[5];
- // int duplicate_buffers_size[5];
- // int duplicate_reads[5];
- // int memory_direct_count[5];
- // int memory_indirect_count[5];
- // int arc_no_grow[5];
- // int arc_tempreserve[5];
- // int arc_loaned_bytes[5];
- // int arc_prune[5];
- // int arc_meta_used[5];
- // int arc_meta_limit[5];
- // int arc_meta_max[5];
- // int arc_meta_min[5];
- // int arc_need_free[5];
- // int arc_sys_free[5];
- } mibs;
-
- arcstats.l2exist = -1;
-
- if(unlikely(sysctlbyname("kstat.zfs.misc.arcstats.l2_size", &l2_size, &uint64_t_size, NULL, 0)))
- return 0;
-
- if(likely(l2_size))
- arcstats.l2exist = 1;
- else
- arcstats.l2exist = 0;
-
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hits", mibs.hits, arcstats.hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.misses", mibs.misses, arcstats.misses);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_hits", mibs.demand_data_hits, arcstats.demand_data_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_data_misses", mibs.demand_data_misses, arcstats.demand_data_misses);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_hits", mibs.demand_metadata_hits, arcstats.demand_metadata_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.demand_metadata_misses", mibs.demand_metadata_misses, arcstats.demand_metadata_misses);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_hits", mibs.prefetch_data_hits, arcstats.prefetch_data_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_data_misses", mibs.prefetch_data_misses, arcstats.prefetch_data_misses);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_hits", mibs.prefetch_metadata_hits, arcstats.prefetch_metadata_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.prefetch_metadata_misses", mibs.prefetch_metadata_misses, arcstats.prefetch_metadata_misses);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_hits", mibs.mru_hits, arcstats.mru_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_hits", mibs.mru_ghost_hits, arcstats.mru_ghost_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_hits", mibs.mfu_hits, arcstats.mfu_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_hits", mibs.mfu_ghost_hits, arcstats.mfu_ghost_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.deleted", mibs.deleted, arcstats.deleted);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mutex_miss", mibs.mutex_miss, arcstats.mutex_miss);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_skip", mibs.evict_skip, arcstats.evict_skip);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_not_enough", mibs.evict_not_enough, arcstats.evict_not_enough);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_cached", mibs.evict_l2_cached, arcstats.evict_l2_cached);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_eligible", mibs.evict_l2_eligible, arcstats.evict_l2_eligible);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_ineligible", mibs.evict_l2_ineligible, arcstats.evict_l2_ineligible);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.evict_l2_skip", mibs.evict_l2_skip, arcstats.evict_l2_skip);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements", mibs.hash_elements, arcstats.hash_elements);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_elements_max", mibs.hash_elements_max, arcstats.hash_elements_max);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_collisions", mibs.hash_collisions, arcstats.hash_collisions);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chains", mibs.hash_chains, arcstats.hash_chains);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hash_chain_max", mibs.hash_chain_max, arcstats.hash_chain_max);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.p", mibs.p, arcstats.p);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c", mibs.c, arcstats.c);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_min", mibs.c_min, arcstats.c_min);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.c_max", mibs.c_max, arcstats.c_max);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.size", mibs.size, arcstats.size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.hdr_size", mibs.hdr_size, arcstats.hdr_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.data_size", mibs.data_size, arcstats.data_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.metadata_size", mibs.metadata_size, arcstats.metadata_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.other_size", mibs.other_size, arcstats.other_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_size", mibs.anon_size, arcstats.anon_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_data", mibs.anon_evictable_data, arcstats.anon_evictable_data);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.anon_evictable_metadata", mibs.anon_evictable_metadata, arcstats.anon_evictable_metadata);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_size", mibs.mru_size, arcstats.mru_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_data", mibs.mru_evictable_data, arcstats.mru_evictable_data);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_evictable_metadata", mibs.mru_evictable_metadata, arcstats.mru_evictable_metadata);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_size", mibs.mru_ghost_size, arcstats.mru_ghost_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_data", mibs.mru_ghost_evictable_data, arcstats.mru_ghost_evictable_data);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata", mibs.mru_ghost_evictable_metadata, arcstats.mru_ghost_evictable_metadata);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_size", mibs.mfu_size, arcstats.mfu_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_data", mibs.mfu_evictable_data, arcstats.mfu_evictable_data);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_evictable_metadata", mibs.mfu_evictable_metadata, arcstats.mfu_evictable_metadata);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_size", mibs.mfu_ghost_size, arcstats.mfu_ghost_size);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_data", mibs.mfu_ghost_evictable_data, arcstats.mfu_ghost_evictable_data);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata", mibs.mfu_ghost_evictable_metadata, arcstats.mfu_ghost_evictable_metadata);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hits", mibs.l2_hits, arcstats.l2_hits);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_misses", mibs.l2_misses, arcstats.l2_misses);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_feeds", mibs.l2_feeds, arcstats.l2_feeds);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_rw_clash", mibs.l2_rw_clash, arcstats.l2_rw_clash);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_read_bytes", mibs.l2_read_bytes, arcstats.l2_read_bytes);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_write_bytes", mibs.l2_write_bytes, arcstats.l2_write_bytes);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_sent", mibs.l2_writes_sent, arcstats.l2_writes_sent);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_done", mibs.l2_writes_done, arcstats.l2_writes_done);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_error", mibs.l2_writes_error, arcstats.l2_writes_error);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_writes_lock_retry", mibs.l2_writes_lock_retry, arcstats.l2_writes_lock_retry);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_lock_retry", mibs.l2_evict_lock_retry, arcstats.l2_evict_lock_retry);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_reading", mibs.l2_evict_reading, arcstats.l2_evict_reading);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_evict_l1cached", mibs.l2_evict_l1cached, arcstats.l2_evict_l1cached);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_free_on_write", mibs.l2_free_on_write, arcstats.l2_free_on_write);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cdata_free_on_write", mibs.l2_cdata_free_on_write, arcstats.l2_cdata_free_on_write);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_abort_lowmem", mibs.l2_abort_lowmem, arcstats.l2_abort_lowmem);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_cksum_bad", mibs.l2_cksum_bad, arcstats.l2_cksum_bad);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_io_error", mibs.l2_io_error, arcstats.l2_io_error);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_size", mibs.l2_size, arcstats.l2_size);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_asize", mibs.l2_asize, arcstats.l2_asize);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_hdr_size", mibs.l2_hdr_size, arcstats.l2_hdr_size);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_successes", mibs.l2_compress_successes, arcstats.l2_compress_successes);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_zeros", mibs.l2_compress_zeros, arcstats.l2_compress_zeros);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.l2_compress_failures", mibs.l2_compress_failures, arcstats.l2_compress_failures);
- GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_throttle_count", mibs.memory_throttle_count, arcstats.memory_throttle_count);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers", mibs.duplicate_buffers, arcstats.duplicate_buffers);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_buffers_size", mibs.duplicate_buffers_size, arcstats.duplicate_buffers_size);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.duplicate_reads", mibs.duplicate_reads, arcstats.duplicate_reads);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_direct_count", mibs.memory_direct_count, arcstats.memory_direct_count);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.memory_indirect_count", mibs.memory_indirect_count, arcstats.memory_indirect_count);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_no_grow", mibs.arc_no_grow, arcstats.arc_no_grow);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_tempreserve", mibs.arc_tempreserve, arcstats.arc_tempreserve);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_loaned_bytes", mibs.arc_loaned_bytes, arcstats.arc_loaned_bytes);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_prune", mibs.arc_prune, arcstats.arc_prune);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_used", mibs.arc_meta_used, arcstats.arc_meta_used);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_limit", mibs.arc_meta_limit, arcstats.arc_meta_limit);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_max", mibs.arc_meta_max, arcstats.arc_meta_max);
- // not used: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_meta_min", mibs.arc_meta_min, arcstats.arc_meta_min);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free);
- // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free);
-
- if (arcstats.size > arcstats.c_min) {
- zfs_arcstats_shrinkable_cache_size_bytes = arcstats.size - arcstats.c_min;
- } else {
- zfs_arcstats_shrinkable_cache_size_bytes = 0;
- }
-
- generate_charts_arcstats("freebsd.plugin", "zfs", show_zero_charts, update_every);
- generate_charts_arc_summary("freebsd.plugin", "zfs", show_zero_charts, update_every);
-
- return 0;
-}
-
-// kstat.zfs.misc.zio_trim
-
-int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) {
- (void)dt;
- static int mib_bytes[5] = {0, 0, 0, 0, 0}, mib_success[5] = {0, 0, 0, 0, 0},
- mib_failed[5] = {0, 0, 0, 0, 0}, mib_unsupported[5] = {0, 0, 0, 0, 0};
- uint64_t bytes, success, failed, unsupported;
-
- if (unlikely(GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.bytes", mib_bytes, bytes) ||
- GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.success", mib_success, success) ||
- GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.failed", mib_failed, failed) ||
- GETSYSCTL_SIMPLE("kstat.zfs.misc.zio_trim.unsupported", mib_unsupported, unsupported))) {
- collector_error("DISABLED: zfs.trim_bytes chart");
- collector_error("DISABLED: zfs.trim_success chart");
- collector_error("DISABLED: kstat.zfs.misc.zio_trim module");
- return 1;
- } else {
-
- static RRDSET *st_bytes = NULL;
- static RRDDIM *rd_bytes = NULL;
-
- if (unlikely(!st_bytes)) {
- st_bytes = rrdset_create_localhost(
- "zfs",
- "trim_bytes",
- NULL,
- "trim",
- NULL,
- "Successfully TRIMmed bytes",
- "bytes",
- "freebsd.plugin",
- "zfs",
- 2320,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_bytes = rrddim_add(st_bytes, "TRIMmed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_bytes, rd_bytes, bytes);
- rrdset_done(st_bytes);
-
- static RRDSET *st_requests = NULL;
- static RRDDIM *rd_successful = NULL, *rd_failed = NULL, *rd_unsupported = NULL;
-
- if (unlikely(!st_requests)) {
- st_requests = rrdset_create_localhost(
- "zfs",
- "trim_requests",
- NULL,
- "trim",
- NULL,
- "TRIM requests",
- "requests",
- "freebsd.plugin",
- "zfs",
- 2321,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- rd_successful = rrddim_add(st_requests, "successful", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_requests, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_unsupported = rrddim_add(st_requests, "unsupported", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_requests, rd_successful, success);
- rrddim_set_by_pointer(st_requests, rd_failed, failed);
- rrddim_set_by_pointer(st_requests, rd_unsupported, unsupported);
- rrdset_done(st_requests);
-
- }
-
- return 0;
-}
diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c
deleted file mode 100644
index 8a6df509d..000000000
--- a/collectors/freebsd.plugin/freebsd_sysctl.c
+++ /dev/null
@@ -1,3093 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-#include <sys/vmmeter.h>
-#include <vm/vm_param.h>
-
-#define _KERNEL
-#include <sys/sem.h>
-#include <sys/shm.h>
-#include <sys/msg.h>
-#undef _KERNEL
-
-#include <net/netisr.h>
-
-#include <netinet/ip.h>
-#include <netinet/ip_var.h>
-#include <netinet/ip_icmp.h>
-#include <netinet/icmp_var.h>
-#include <netinet6/ip6_var.h>
-#include <netinet/icmp6.h>
-#include <netinet/tcp_var.h>
-#include <netinet/tcp_fsm.h>
-#include <netinet/udp.h>
-#include <netinet/udp_var.h>
-
-// --------------------------------------------------------------------------------------------------------------------
-// common definitions and variables
-
-int system_pagesize = PAGE_SIZE;
-int number_of_cpus = 1;
-#if __FreeBSD_version >= 1200029
-struct __vmmeter {
- uint64_t v_swtch;
- uint64_t v_trap;
- uint64_t v_syscall;
- uint64_t v_intr;
- uint64_t v_soft;
- uint64_t v_vm_faults;
- uint64_t v_io_faults;
- uint64_t v_cow_faults;
- uint64_t v_cow_optim;
- uint64_t v_zfod;
- uint64_t v_ozfod;
- uint64_t v_swapin;
- uint64_t v_swapout;
- uint64_t v_swappgsin;
- uint64_t v_swappgsout;
- uint64_t v_vnodein;
- uint64_t v_vnodeout;
- uint64_t v_vnodepgsin;
- uint64_t v_vnodepgsout;
- uint64_t v_intrans;
- uint64_t v_reactivated;
- uint64_t v_pdwakeups;
- uint64_t v_pdpages;
- uint64_t v_pdshortfalls;
- uint64_t v_dfree;
- uint64_t v_pfree;
- uint64_t v_tfree;
- uint64_t v_forks;
- uint64_t v_vforks;
- uint64_t v_rforks;
- uint64_t v_kthreads;
- uint64_t v_forkpages;
- uint64_t v_vforkpages;
- uint64_t v_rforkpages;
- uint64_t v_kthreadpages;
- u_int v_page_size;
- u_int v_page_count;
- u_int v_free_reserved;
- u_int v_free_target;
- u_int v_free_min;
- u_int v_free_count;
- u_int v_wire_count;
- u_int v_active_count;
- u_int v_inactive_target;
- u_int v_inactive_count;
- u_int v_laundry_count;
- u_int v_pageout_free_min;
- u_int v_interrupt_free_min;
- u_int v_free_severe;
-};
-typedef struct __vmmeter vmmeter_t;
-#else
-typedef struct vmmeter vmmeter_t;
-#endif
-
-#if (__FreeBSD_version >= 1101516 && __FreeBSD_version < 1200000) || __FreeBSD_version >= 1200015
-#define NETDATA_COLLECT_LAUNDRY 1
-#endif
-
-// FreeBSD plugin initialization
-
-int freebsd_plugin_init()
-{
- system_pagesize = getpagesize();
- if (system_pagesize <= 0) {
- collector_error("FREEBSD: can't get system page size");
- return 1;
- }
-
- if (unlikely(GETSYSCTL_BY_NAME("kern.smp.cpus", number_of_cpus))) {
- collector_error("FREEBSD: can't get number of cpus");
- return 1;
- }
-
- if (unlikely(!number_of_cpus)) {
- collector_error("FREEBSD: wrong number of cpus");
- return 1;
- }
-
- return 0;
-}
-
-// vm.loadavg
-
-// FreeBSD calculates load averages once every 5 seconds
-#define MIN_LOADAVG_UPDATE_EVERY 5
-
-int do_vm_loadavg(int update_every, usec_t dt){
- static usec_t next_loadavg_dt = 0;
-
- if (next_loadavg_dt <= dt) {
- static int mib[2] = {0, 0};
- struct loadavg sysload;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.loadavg", mib, sysload))) {
- collector_error("DISABLED: system.load chart");
- collector_error("DISABLED: vm.loadavg module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd_load1 = NULL, *rd_load2 = NULL, *rd_load3 = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "load",
- NULL,
- "load",
- NULL,
- "System Load Average",
- "load",
- "freebsd.plugin",
- "vm.loadavg",
- NETDATA_CHART_PRIO_SYSTEM_LOAD,
- (update_every < MIN_LOADAVG_UPDATE_EVERY) ?
- MIN_LOADAVG_UPDATE_EVERY : update_every, RRDSET_TYPE_LINE
- );
- rd_load1 = rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rd_load2 = rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rd_load3 = rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_load1, (collected_number) ((double) sysload.ldavg[0] / sysload.fscale * 1000));
- rrddim_set_by_pointer(st, rd_load2, (collected_number) ((double) sysload.ldavg[1] / sysload.fscale * 1000));
- rrddim_set_by_pointer(st, rd_load3, (collected_number) ((double) sysload.ldavg[2] / sysload.fscale * 1000));
- rrdset_done(st);
-
- next_loadavg_dt = st->update_every * USEC_PER_SEC;
- }
- }
- else
- next_loadavg_dt -= dt;
-
- return 0;
-}
-
-// vm.vmtotal
-
-int do_vm_vmtotal(int update_every, usec_t dt) {
- (void)dt;
- static int do_all_processes = -1, do_processes = -1, do_mem_real = -1;
-
- if (unlikely(do_all_processes == -1)) {
- do_all_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "enable total processes", 1);
- do_processes = config_get_boolean("plugin:freebsd:vm.vmtotal", "processes running", 1);
- do_mem_real = config_get_boolean("plugin:freebsd:vm.vmtotal", "real memory", 1);
- }
-
- if (likely(do_all_processes | do_processes | do_mem_real)) {
- static int mib[2] = {0, 0};
- struct vmtotal vmtotal_data;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.vmtotal", mib, vmtotal_data))) {
- do_all_processes = 0;
- collector_error("DISABLED: system.active_processes chart");
- do_processes = 0;
- collector_error("DISABLED: system.processes chart");
- do_mem_real = 0;
- collector_error("DISABLED: mem.real chart");
- collector_error("DISABLED: vm.vmtotal module");
- return 1;
- } else {
- if (likely(do_all_processes)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "active_processes",
- NULL,
- "processes",
- NULL,
- "System Active Processes",
- "processes",
- "freebsd.plugin",
- "vm.vmtotal",
- NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES,
- update_every,
- RRDSET_TYPE_LINE
- );
- rd = rrddim_add(st, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, (vmtotal_data.t_rq + vmtotal_data.t_dw + vmtotal_data.t_pw + vmtotal_data.t_sl + vmtotal_data.t_sw));
- rrdset_done(st);
- }
-
- if (likely(do_processes)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_running = NULL, *rd_blocked = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "processes",
- NULL,
- "processes",
- NULL,
- "System Processes",
- "processes",
- "freebsd.plugin",
- "vm.vmtotal",
- NETDATA_CHART_PRIO_SYSTEM_PROCESSES,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_running = rrddim_add(st, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_blocked = rrddim_add(st, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_running, vmtotal_data.t_rq);
- rrddim_set_by_pointer(st, rd_blocked, (vmtotal_data.t_dw + vmtotal_data.t_pw));
- rrdset_done(st);
- }
-
- if (likely(do_mem_real)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem",
- "real",
- NULL,
- "system",
- NULL,
- "Total Real Memory In Use",
- "MiB",
- "freebsd.plugin",
- "vm.vmtotal",
- NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED,
- update_every,
- RRDSET_TYPE_AREA
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, vmtotal_data.t_rm);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: vm.vmtotal module");
- return 1;
- }
-
- return 0;
-}
-
-// kern.cp_time
-
-int do_kern_cp_time(int update_every, usec_t dt) {
- (void)dt;
-
- if (unlikely(CPUSTATES != 5)) {
- collector_error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
- collector_error("DISABLED: system.cpu chart");
- collector_error("DISABLED: kern.cp_time module");
- return 1;
- } else {
- static int mib[2] = {0, 0};
- long cp_time[CPUSTATES];
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
- collector_error("DISABLED: system.cpu chart");
- collector_error("DISABLED: kern.cp_time module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd_nice = NULL, *rd_system = NULL, *rd_user = NULL, *rd_interrupt = NULL, *rd_idle = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "cpu",
- NULL,
- "cpu",
- "system.cpu",
- "Total CPU utilization",
- "percentage",
- "freebsd.plugin",
- "kern.cp_time",
- NETDATA_CHART_PRIO_SYSTEM_CPU,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- rd_nice = rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_system = rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_user = rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_interrupt = rrddim_add(st, "interrupt", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_idle = rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(st, "idle");
- }
-
- rrddim_set_by_pointer(st, rd_nice, cp_time[1]);
- rrddim_set_by_pointer(st, rd_system, cp_time[2]);
- rrddim_set_by_pointer(st, rd_user, cp_time[0]);
- rrddim_set_by_pointer(st, rd_interrupt, cp_time[3]);
- rrddim_set_by_pointer(st, rd_idle, cp_time[4]);
- rrdset_done(st);
- }
- }
-
- return 0;
-}
-
-// kern.cp_times
-
-int do_kern_cp_times(int update_every, usec_t dt) {
- (void)dt;
-
- if (unlikely(CPUSTATES != 5)) {
- collector_error("FREEBSD: There are %d CPU states (5 was expected)", CPUSTATES);
- collector_error("DISABLED: cpu.cpuXX charts");
- collector_error("DISABLED: kern.cp_times module");
- return 1;
- } else {
- static int mib[2] = {0, 0};
- long cp_time[CPUSTATES];
- static long *pcpu_cp_time = NULL;
- static int old_number_of_cpus = 0;
-
- if(unlikely(number_of_cpus != old_number_of_cpus))
- pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
- if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) {
- collector_error("DISABLED: cpu.cpuXX charts");
- collector_error("DISABLED: kern.cp_times module");
- return 1;
- } else {
- int i;
- static struct cpu_chart {
- char cpuid[MAX_INT_DIGITS + 4];
- RRDSET *st;
- RRDDIM *rd_user;
- RRDDIM *rd_nice;
- RRDDIM *rd_system;
- RRDDIM *rd_interrupt;
- RRDDIM *rd_idle;
- } *all_cpu_charts = NULL;
-
- if(unlikely(number_of_cpus > old_number_of_cpus)) {
- all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus);
- memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus));
- }
-
- for (i = 0; i < number_of_cpus; i++) {
- if (unlikely(!all_cpu_charts[i].st)) {
- snprintfz(all_cpu_charts[i].cpuid, MAX_INT_DIGITS, "cpu%d", i);
- all_cpu_charts[i].st = rrdset_create_localhost(
- "cpu",
- all_cpu_charts[i].cpuid,
- NULL,
- "utilization",
- "cpu.cpu",
- "Core utilization",
- "percentage",
- "freebsd.plugin",
- "kern.cp_times",
- NETDATA_CHART_PRIO_CPU_PER_CORE,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- all_cpu_charts[i].rd_nice = rrddim_add(all_cpu_charts[i].st, "nice", NULL, 1, 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- all_cpu_charts[i].rd_system = rrddim_add(all_cpu_charts[i].st, "system", NULL, 1, 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- all_cpu_charts[i].rd_user = rrddim_add(all_cpu_charts[i].st, "user", NULL, 1, 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- all_cpu_charts[i].rd_interrupt = rrddim_add(all_cpu_charts[i].st, "interrupt", NULL, 1, 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- all_cpu_charts[i].rd_idle = rrddim_add(all_cpu_charts[i].st, "idle", NULL, 1, 1,
- RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(all_cpu_charts[i].st, "idle");
- }
-
- rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_nice, pcpu_cp_time[i * 5 + 1]);
- rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_system, pcpu_cp_time[i * 5 + 2]);
- rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_user, pcpu_cp_time[i * 5 + 0]);
- rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_interrupt, pcpu_cp_time[i * 5 + 3]);
- rrddim_set_by_pointer(all_cpu_charts[i].st, all_cpu_charts[i].rd_idle, pcpu_cp_time[i * 5 + 4]);
- rrdset_done(all_cpu_charts[i].st);
- }
- }
-
- old_number_of_cpus = number_of_cpus;
- }
-
- return 0;
-}
-
-// dev.cpu.temperature
-
-int do_dev_cpu_temperature(int update_every, usec_t dt) {
- (void)dt;
-
- int i;
- static int *mib = NULL;
- static int *pcpu_temperature = NULL;
- static int old_number_of_cpus = 0;
- char char_mib[MAX_INT_DIGITS + 21];
- char char_rd[MAX_INT_DIGITS + 9];
-
- if (unlikely(number_of_cpus != old_number_of_cpus)) {
- pcpu_temperature = reallocz(pcpu_temperature, sizeof(int) * number_of_cpus);
- mib = reallocz(mib, sizeof(int) * number_of_cpus * 4);
- if (unlikely(number_of_cpus > old_number_of_cpus))
- memset(&mib[old_number_of_cpus * 4], 0, sizeof(int) * (number_of_cpus - old_number_of_cpus) * 4);
- }
- for (i = 0; i < number_of_cpus; i++) {
- if (unlikely(!(mib[i * 4])))
- sprintf(char_mib, "dev.cpu.%d.temperature", i);
- if (unlikely(getsysctl_simple(char_mib, &mib[i * 4], 4, &pcpu_temperature[i], sizeof(int)))) {
- collector_error("DISABLED: cpu.temperature chart");
- collector_error("DISABLED: dev.cpu.temperature module");
- return 1;
- }
- }
-
- static RRDSET *st;
- static RRDDIM **rd_pcpu_temperature;
-
- if (unlikely(number_of_cpus != old_number_of_cpus)) {
- rd_pcpu_temperature = reallocz(rd_pcpu_temperature, sizeof(RRDDIM *) * number_of_cpus);
- if (unlikely(number_of_cpus > old_number_of_cpus))
- memset(&rd_pcpu_temperature[old_number_of_cpus], 0, sizeof(RRDDIM *) * (number_of_cpus - old_number_of_cpus));
- }
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "cpu",
- "temperature",
- NULL,
- "temperature",
- "cpu.temperature",
- "Core temperature",
- "Celsius",
- "freebsd.plugin",
- "dev.cpu.temperature",
- NETDATA_CHART_PRIO_CPU_TEMPERATURE,
- update_every,
- RRDSET_TYPE_LINE
- );
- }
-
- for (i = 0; i < number_of_cpus; i++) {
- if (unlikely(!rd_pcpu_temperature[i])) {
- sprintf(char_rd, "cpu%d.temp", i);
- rd_pcpu_temperature[i] = rrddim_add(st, char_rd, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_pcpu_temperature[i], (collected_number) ((double)pcpu_temperature[i] / 10 - 273.15));
- }
-
- rrdset_done(st);
-
- old_number_of_cpus = number_of_cpus;
-
- return 0;
-}
-
-// dev.cpu.0.freq
-
-int do_dev_cpu_0_freq(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- int cpufreq;
-
- if (unlikely(GETSYSCTL_SIMPLE("dev.cpu.0.freq", mib, cpufreq))) {
- collector_error("DISABLED: cpu.scaling_cur_freq chart");
- collector_error("DISABLED: dev.cpu.0.freq module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "cpu",
- "scaling_cur_freq",
- NULL,
- "cpufreq",
- NULL,
- "Current CPU Scaling Frequency",
- "MHz",
- "freebsd.plugin",
- "dev.cpu.0.freq",
- NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "frequency", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, cpufreq);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// hw.intrcnt
-
-int do_hw_intcnt(int update_every, usec_t dt) {
- (void)dt;
- static int mib_hw_intrcnt[2] = {0, 0};
- size_t intrcnt_size = 0;
-
- if (unlikely(GETSYSCTL_SIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt_size))) {
- collector_error("DISABLED: system.intr chart");
- collector_error("DISABLED: system.interrupts chart");
- collector_error("DISABLED: hw.intrcnt module");
- return 1;
- } else {
- unsigned long nintr = 0;
- static unsigned long old_nintr = 0;
- static unsigned long *intrcnt = NULL;
-
- nintr = intrcnt_size / sizeof(u_long);
- if (unlikely(nintr != old_nintr))
- intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
- if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) {
- collector_error("DISABLED: system.intr chart");
- collector_error("DISABLED: system.interrupts chart");
- collector_error("DISABLED: hw.intrcnt module");
- return 1;
- } else {
- unsigned long long totalintr = 0;
- unsigned long i;
-
- for (i = 0; i < nintr; i++)
- totalintr += intrcnt[i];
-
- static RRDSET *st_intr = NULL;
- static RRDDIM *rd_intr = NULL;
-
- if (unlikely(!st_intr)) {
- st_intr = rrdset_create_localhost(
- "system",
- "intr",
- NULL,
- "interrupts",
- NULL,
- "Total Hardware Interrupts",
- "interrupts/s",
- "freebsd.plugin",
- "hw.intrcnt",
- NETDATA_CHART_PRIO_SYSTEM_INTR,
- update_every,
- RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
-
- rd_intr = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_intr, rd_intr, totalintr);
- rrdset_done(st_intr);
-
- size_t size;
- static int mib_hw_intrnames[2] = {0, 0};
- static char *intrnames = NULL;
-
- if (unlikely(GETSYSCTL_SIZE("hw.intrnames", mib_hw_intrnames, size))) {
- collector_error("DISABLED: system.intr chart");
- collector_error("DISABLED: system.interrupts chart");
- collector_error("DISABLED: hw.intrcnt module");
- return 1;
- } else {
- if (unlikely(nintr != old_nintr))
- intrnames = reallocz(intrnames, size);
- if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) {
- collector_error("DISABLED: system.intr chart");
- collector_error("DISABLED: system.interrupts chart");
- collector_error("DISABLED: hw.intrcnt module");
- return 1;
- } else {
- static RRDSET *st_interrupts = NULL;
-
- if (unlikely(!st_interrupts)) {
- st_interrupts = rrdset_create_localhost(
- "system",
- "interrupts",
- NULL,
- "interrupts",
- NULL,
- "System interrupts",
- "interrupts/s",
- "freebsd.plugin",
- "hw.intrcnt",
- NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS,
- update_every,
- RRDSET_TYPE_STACKED
- );
- }
-
- for (i = 0; i < nintr; i++) {
- void *p;
-
- p = intrnames + i * (strlen(intrnames) + 1);
- if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) {
- RRDDIM *rd_interrupts = rrddim_find_active(st_interrupts, p);
-
- if (unlikely(!rd_interrupts))
- rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]);
- }
- }
- rrdset_done(st_interrupts);
- }
- }
- }
-
- old_nintr = nintr;
- }
-
- return 0;
-}
-
-// vm.stats.sys.v_intr
-
-int do_vm_stats_sys_v_intr(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- u_int int_number;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_intr", mib, int_number))) {
- collector_error("DISABLED: system.dev_intr chart");
- collector_error("DISABLED: vm.stats.sys.v_intr module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "dev_intr",
- NULL,
- "interrupts",
- NULL,
- "Device Interrupts",
- "interrupts/s",
- "freebsd.plugin",
- "vm.stats.sys.v_intr",
- NETDATA_CHART_PRIO_SYSTEM_DEV_INTR,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd, int_number);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// vm.stats.sys.v_soft
-
-int do_vm_stats_sys_v_soft(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- u_int soft_intr_number;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_soft", mib, soft_intr_number))) {
- collector_error("DISABLED: system.dev_intr chart");
- collector_error("DISABLED: vm.stats.sys.v_soft module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "soft_intr",
- NULL,
- "interrupts",
- NULL,
- "Software Interrupts",
- "interrupts/s",
- "freebsd.plugin",
- "vm.stats.sys.v_soft",
- NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd, soft_intr_number);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// vm.stats.sys.v_swtch
-
-int do_vm_stats_sys_v_swtch(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- u_int ctxt_number;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.sys.v_swtch", mib, ctxt_number))) {
- collector_error("DISABLED: system.ctxt chart");
- collector_error("DISABLED: vm.stats.sys.v_swtch module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "ctxt",
- NULL,
- "processes",
- NULL,
- "CPU Context Switches",
- "context switches/s",
- "freebsd.plugin",
- "vm.stats.sys.v_swtch",
- NETDATA_CHART_PRIO_SYSTEM_CTXT,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd, ctxt_number);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// vm.stats.vm.v_forks
-
-int do_vm_stats_sys_v_forks(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- u_int forks_number;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_forks", mib, forks_number))) {
- collector_error("DISABLED: system.forks chart");
- collector_error("DISABLED: vm.stats.sys.v_swtch module");
- return 1;
- } else {
-
- // --------------------------------------------------------------------
-
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "forks",
- NULL,
- "processes",
- NULL,
- "Started Processes",
- "processes/s",
- "freebsd.plugin",
- "vm.stats.sys.v_swtch",
- NETDATA_CHART_PRIO_SYSTEM_FORKS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd, forks_number);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// vm.swap_info
-
-int do_vm_swap_info(int update_every, usec_t dt) {
- (void)dt;
- static int mib[3] = {0, 0, 0};
-
- if (unlikely(getsysctl_mib("vm.swap_info", mib, 2))) {
- collector_error("DISABLED: mem.swap chart");
- collector_error("DISABLED: vm.swap_info module");
- return 1;
- } else {
- int i;
- struct xswdev xsw;
- struct total_xsw {
- collected_number bytes_used;
- collected_number bytes_total;
- } total_xsw = {0, 0};
-
- for (i = 0; ; i++) {
- size_t size;
-
- mib[2] = i;
- size = sizeof(xsw);
- if (unlikely(sysctl(mib, 3, &xsw, &size, NULL, 0) == -1 )) {
- if (unlikely(errno != ENOENT)) {
- collector_error("FREEBSD: sysctl(%s...) failed: %s", "vm.swap_info", strerror(errno));
- collector_error("DISABLED: mem.swap chart");
- collector_error("DISABLED: vm.swap_info module");
- return 1;
- } else {
- if (unlikely(size != sizeof(xsw))) {
- collector_error("FREEBSD: sysctl(%s...) expected %lu, got %lu", "vm.swap_info", (unsigned long)sizeof(xsw), (unsigned long)size);
- collector_error("DISABLED: mem.swap chart");
- collector_error("DISABLED: vm.swap_info module");
- return 1;
- } else break;
- }
- }
- total_xsw.bytes_used += xsw.xsw_used;
- total_xsw.bytes_total += xsw.xsw_nblks;
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_free = NULL, *rd_used = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem",
- "swap",
- NULL,
- "swap",
- NULL,
- "System Swap",
- "MiB",
- "freebsd.plugin",
- "vm.swap_info",
- NETDATA_CHART_PRIO_MEM_SWAP,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_free, total_xsw.bytes_total - total_xsw.bytes_used);
- rrddim_set_by_pointer(st, rd_used, total_xsw.bytes_used);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// system.ram
-
-int do_system_ram(int update_every, usec_t dt) {
- (void)dt;
- static int mib_active_count[4] = {0, 0, 0, 0},
- mib_inactive_count[4] = {0, 0, 0, 0},
- mib_wire_count[4] = {0, 0, 0, 0},
-#if __FreeBSD_version < 1200016
- mib_cache_count[4] = {0, 0, 0, 0},
-#endif
- mib_vfs_bufspace[2] = {0, 0},
- mib_free_count[4] = {0, 0, 0, 0};
- vmmeter_t vmmeter_data;
- size_t vfs_bufspace_count;
-
-#if defined(NETDATA_COLLECT_LAUNDRY)
- static int mib_laundry_count[4] = {0, 0, 0, 0};
-#endif
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_active_count", mib_active_count, vmmeter_data.v_active_count) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_inactive_count", mib_inactive_count, vmmeter_data.v_inactive_count) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_wire_count", mib_wire_count, vmmeter_data.v_wire_count) ||
-#if __FreeBSD_version < 1200016
- GETSYSCTL_SIMPLE("vm.stats.vm.v_cache_count", mib_cache_count, vmmeter_data.v_cache_count) ||
-#endif
-#if defined(NETDATA_COLLECT_LAUNDRY)
- GETSYSCTL_SIMPLE("vm.stats.vm.v_laundry_count", mib_laundry_count, vmmeter_data.v_laundry_count) ||
-#endif
- GETSYSCTL_SIMPLE("vfs.bufspace", mib_vfs_bufspace, vfs_bufspace_count) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_free_count", mib_free_count, vmmeter_data.v_free_count))) {
- collector_error("DISABLED: system.ram chart");
- collector_error("DISABLED: system.ram module");
- return 1;
- } else {
- static RRDSET *st = NULL, *st_mem_available = NULL;
- static RRDDIM *rd_free = NULL, *rd_active = NULL, *rd_inactive = NULL, *rd_wired = NULL,
- *rd_cache = NULL, *rd_buffers = NULL, *rd_avail = NULL;
-
-#if defined(NETDATA_COLLECT_LAUNDRY)
- static RRDDIM *rd_laundry = NULL;
-#endif
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "ram",
- NULL,
- "ram",
- NULL,
- "System RAM",
- "MiB",
- "freebsd.plugin",
- "system.ram",
- NETDATA_CHART_PRIO_SYSTEM_RAM,
- update_every,
- RRDSET_TYPE_STACKED
- );
-
- rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rd_active = rrddim_add(st, "active", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rd_inactive = rrddim_add(st, "inactive", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rd_wired = rrddim_add(st, "wired", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rd_cache = rrddim_add(st, "cache", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
-#if defined(NETDATA_COLLECT_LAUNDRY)
- rd_laundry = rrddim_add(st, "laundry", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
-#endif
- rd_buffers = rrddim_add(st, "buffers", NULL, 1, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_free, vmmeter_data.v_free_count);
- rrddim_set_by_pointer(st, rd_active, vmmeter_data.v_active_count);
- rrddim_set_by_pointer(st, rd_inactive, vmmeter_data.v_inactive_count);
- rrddim_set_by_pointer(st, rd_wired, (unsigned long long)vmmeter_data.v_wire_count * (unsigned long long)system_pagesize - zfs_arcstats_shrinkable_cache_size_bytes);
-#if __FreeBSD_version < 1200016
- rrddim_set_by_pointer(st, rd_cache, (unsigned long long)vmmeter_data.v_cache_count * (unsigned long long)system_pagesize + zfs_arcstats_shrinkable_cache_size_bytes);
-#else
- rrddim_set_by_pointer(st, rd_cache, zfs_arcstats_shrinkable_cache_size_bytes);
-#endif
-#if defined(NETDATA_COLLECT_LAUNDRY)
- rrddim_set_by_pointer(st, rd_laundry, vmmeter_data.v_laundry_count);
-#endif
- rrddim_set_by_pointer(st, rd_buffers, vfs_bufspace_count);
- rrdset_done(st);
-
- if (unlikely(!st_mem_available)) {
- st_mem_available = rrdset_create_localhost(
- "mem",
- "available",
- NULL,
- "system",
- NULL,
- "Available RAM for applications",
- "MiB",
- "freebsd.plugin",
- "system.ram",
- NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
-#if __FreeBSD_version < 1200016
- rrddim_set_by_pointer(st_mem_available, rd_avail, vmmeter_data.v_inactive_count + vmmeter_data.v_free_count + vmmeter_data.v_cache_count + zfs_arcstats_shrinkable_cache_size_bytes / system_pagesize);
-#else
- rrddim_set_by_pointer(st_mem_available, rd_avail, vmmeter_data.v_inactive_count + vmmeter_data.v_free_count + zfs_arcstats_shrinkable_cache_size_bytes / system_pagesize);
-#endif
-
- rrdset_done(st_mem_available);
- }
-
- return 0;
-}
-
-// vm.stats.vm.v_swappgs
-
-int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt) {
- (void)dt;
- static int mib_swappgsin[4] = {0, 0, 0, 0}, mib_swappgsout[4] = {0, 0, 0, 0};
- vmmeter_t vmmeter_data;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsin", mib_swappgsin, vmmeter_data.v_swappgsin) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_swappgsout", mib_swappgsout, vmmeter_data.v_swappgsout))) {
- collector_error("DISABLED: mem.swapio chart");
- collector_error("DISABLED: vm.stats.vm.v_swappgs module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem",
- "swapio",
- NULL,
- "swap",
- NULL,
- "Swap I/O",
- "KiB/s",
- "freebsd.plugin",
- "vm.stats.vm.v_swappgs",
- NETDATA_CHART_PRIO_MEM_SWAPIO,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st, "in", NULL, system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "out", NULL, -system_pagesize, KILO_FACTOR, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, vmmeter_data.v_swappgsin);
- rrddim_set_by_pointer(st, rd_out, vmmeter_data.v_swappgsout);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// vm.stats.vm.v_pgfaults
-
-int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) {
- (void)dt;
- static int mib_vm_faults[4] = {0, 0, 0, 0}, mib_io_faults[4] = {0, 0, 0, 0}, mib_cow_faults[4] = {0, 0, 0, 0},
- mib_cow_optim[4] = {0, 0, 0, 0}, mib_intrans[4] = {0, 0, 0, 0};
- vmmeter_t vmmeter_data;
-
- if (unlikely(GETSYSCTL_SIMPLE("vm.stats.vm.v_vm_faults", mib_vm_faults, vmmeter_data.v_vm_faults) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_io_faults", mib_io_faults, vmmeter_data.v_io_faults) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_faults", mib_cow_faults, vmmeter_data.v_cow_faults) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_cow_optim", mib_cow_optim, vmmeter_data.v_cow_optim) ||
- GETSYSCTL_SIMPLE("vm.stats.vm.v_intrans", mib_intrans, vmmeter_data.v_intrans))) {
- collector_error("DISABLED: mem.pgfaults chart");
- collector_error("DISABLED: vm.stats.vm.v_pgfaults module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd_memory = NULL, *rd_io_requiring = NULL, *rd_cow = NULL,
- *rd_cow_optimized = NULL, *rd_in_transit = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem",
- "pgfaults",
- NULL,
- "system",
- NULL,
- "Memory Page Faults",
- "page faults/s",
- "freebsd.plugin",
- "vm.stats.vm.v_pgfaults",
- NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_cow_optimized = rrddim_add(st, "cow_optimized", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_transit = rrddim_add(st, "in_transit", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_memory, vmmeter_data.v_vm_faults);
- rrddim_set_by_pointer(st, rd_io_requiring, vmmeter_data.v_io_faults);
- rrddim_set_by_pointer(st, rd_cow, vmmeter_data.v_cow_faults);
- rrddim_set_by_pointer(st, rd_cow_optimized, vmmeter_data.v_cow_optim);
- rrddim_set_by_pointer(st, rd_in_transit, vmmeter_data.v_intrans);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// kern.ipc.sem
-
-int do_kern_ipc_sem(int update_every, usec_t dt) {
- (void)dt;
- static int mib_semmni[3] = {0, 0, 0};
- struct ipc_sem {
- int semmni;
- collected_number sets;
- collected_number semaphores;
- } ipc_sem = {0, 0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.semmni", mib_semmni, ipc_sem.semmni))) {
- collector_error("DISABLED: system.ipc_semaphores chart");
- collector_error("DISABLED: system.ipc_semaphore_arrays chart");
- collector_error("DISABLED: kern.ipc.sem module");
- return 1;
- } else {
- static struct semid_kernel *ipc_sem_data = NULL;
- static int old_semmni = 0;
- static int mib_sema[3] = {0, 0, 0};
-
- if (unlikely(ipc_sem.semmni != old_semmni)) {
- ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
- old_semmni = ipc_sem.semmni;
- }
- if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) {
- collector_error("DISABLED: system.ipc_semaphores chart");
- collector_error("DISABLED: system.ipc_semaphore_arrays chart");
- collector_error("DISABLED: kern.ipc.sem module");
- return 1;
- } else {
- int i;
-
- for (i = 0; i < ipc_sem.semmni; i++) {
- if (unlikely(ipc_sem_data[i].u.sem_perm.mode & SEM_ALLOC)) {
- ipc_sem.sets += 1;
- ipc_sem.semaphores += ipc_sem_data[i].u.sem_nsems;
- }
- }
-
- static RRDSET *st_semaphores = NULL, *st_semaphore_arrays = NULL;
- static RRDDIM *rd_semaphores = NULL, *rd_semaphore_arrays = NULL;
-
- if (unlikely(!st_semaphores)) {
- st_semaphores = rrdset_create_localhost(
- "system",
- "ipc_semaphores",
- NULL,
- "ipc semaphores",
- NULL,
- "IPC Semaphores",
- "semaphores",
- "freebsd.plugin",
- "kern.ipc.sem",
- NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_semaphores, rd_semaphores, ipc_sem.semaphores);
- rrdset_done(st_semaphores);
-
- if (unlikely(!st_semaphore_arrays)) {
- st_semaphore_arrays = rrdset_create_localhost(
- "system",
- "ipc_semaphore_arrays",
- NULL,
- "ipc semaphores",
- NULL,
- "IPC Semaphore Arrays",
- "arrays",
- "freebsd.plugin",
- "kern.ipc.sem",
- NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_semaphore_arrays = rrddim_add(st_semaphore_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_semaphore_arrays, rd_semaphore_arrays, ipc_sem.sets);
- rrdset_done(st_semaphore_arrays);
- }
- }
-
- return 0;
-}
-
-// kern.ipc.shm
-
-int do_kern_ipc_shm(int update_every, usec_t dt) {
- (void)dt;
- static int mib_shmmni[3] = {0, 0, 0};
- struct ipc_shm {
- u_long shmmni;
- collected_number segs;
- collected_number segsize;
- } ipc_shm = {0, 0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.shmmni", mib_shmmni, ipc_shm.shmmni))) {
- collector_error("DISABLED: system.ipc_shared_mem_segs chart");
- collector_error("DISABLED: system.ipc_shared_mem_size chart");
- collector_error("DISABLED: kern.ipc.shmmodule");
- return 1;
- } else {
- static struct shmid_kernel *ipc_shm_data = NULL;
- static u_long old_shmmni = 0;
- static int mib_shmsegs[3] = {0, 0, 0};
-
- if (unlikely(ipc_shm.shmmni != old_shmmni)) {
- ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
- old_shmmni = ipc_shm.shmmni;
- }
- if (unlikely(
- GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) {
- collector_error("DISABLED: system.ipc_shared_mem_segs chart");
- collector_error("DISABLED: system.ipc_shared_mem_size chart");
- collector_error("DISABLED: kern.ipc.shmmodule");
- return 1;
- } else {
- unsigned long i;
-
- for (i = 0; i < ipc_shm.shmmni; i++) {
- if (unlikely(ipc_shm_data[i].u.shm_perm.mode & 0x0800)) {
- ipc_shm.segs += 1;
- ipc_shm.segsize += ipc_shm_data[i].u.shm_segsz;
- }
- }
-
- static RRDSET *st_segs = NULL, *st_size = NULL;
- static RRDDIM *rd_segments = NULL, *rd_allocated = NULL;
-
- if (unlikely(!st_segs)) {
- st_segs = rrdset_create_localhost(
- "system",
- "ipc_shared_mem_segs",
- NULL,
- "ipc shared memory",
- NULL,
- "IPC Shared Memory Segments",
- "segments",
- "freebsd.plugin",
- "kern.ipc.shm",
- NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_segments = rrddim_add(st_segs, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_segs, rd_segments, ipc_shm.segs);
- rrdset_done(st_segs);
-
- if (unlikely(!st_size)) {
- st_size = rrdset_create_localhost(
- "system",
- "ipc_shared_mem_size",
- NULL,
- "ipc shared memory",
- NULL,
- "IPC Shared Memory Segments Size",
- "KiB",
- "freebsd.plugin",
- "kern.ipc.shm",
- NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, KILO_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_size, rd_allocated, ipc_shm.segsize);
- rrdset_done(st_size);
- }
- }
-
- return 0;
-}
-
-// kern.ipc.msq
-
-int do_kern_ipc_msq(int update_every, usec_t dt) {
- (void)dt;
- static int mib_msgmni[3] = {0, 0, 0};
- struct ipc_msq {
- int msgmni;
- collected_number queues;
- collected_number messages;
- collected_number usedsize;
- collected_number allocsize;
- } ipc_msq = {0, 0, 0, 0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.ipc.msgmni", mib_msgmni, ipc_msq.msgmni))) {
- collector_error("DISABLED: system.ipc_msq_queues chart");
- collector_error("DISABLED: system.ipc_msq_messages chart");
- collector_error("DISABLED: system.ipc_msq_size chart");
- collector_error("DISABLED: kern.ipc.msg module");
- return 1;
- } else {
- static struct msqid_kernel *ipc_msq_data = NULL;
- static int old_msgmni = 0;
- static int mib_msqids[3] = {0, 0, 0};
-
- if (unlikely(ipc_msq.msgmni != old_msgmni)) {
- ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
- old_msgmni = ipc_msq.msgmni;
- }
- if (unlikely(
- GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) {
- collector_error("DISABLED: system.ipc_msq_queues chart");
- collector_error("DISABLED: system.ipc_msq_messages chart");
- collector_error("DISABLED: system.ipc_msq_size chart");
- collector_error("DISABLED: kern.ipc.msg module");
- return 1;
- } else {
- int i;
-
- for (i = 0; i < ipc_msq.msgmni; i++) {
- if (unlikely(ipc_msq_data[i].u.msg_qbytes != 0)) {
- ipc_msq.queues += 1;
- ipc_msq.messages += ipc_msq_data[i].u.msg_qnum;
- ipc_msq.usedsize += ipc_msq_data[i].u.msg_cbytes;
- ipc_msq.allocsize += ipc_msq_data[i].u.msg_qbytes;
- }
- }
-
- static RRDSET *st_queues = NULL, *st_messages = NULL, *st_size = NULL;
- static RRDDIM *rd_queues = NULL, *rd_messages = NULL, *rd_allocated = NULL, *rd_used = NULL;
-
- if (unlikely(!st_queues)) {
- st_queues = rrdset_create_localhost(
- "system",
- "ipc_msq_queues",
- NULL,
- "ipc message queues",
- NULL,
- "Number of IPC Message Queues",
- "queues",
- "freebsd.plugin",
- "kern.ipc.msq",
- NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_queues = rrddim_add(st_queues, "queues", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_queues, rd_queues, ipc_msq.queues);
- rrdset_done(st_queues);
-
- if (unlikely(!st_messages)) {
- st_messages = rrdset_create_localhost(
- "system",
- "ipc_msq_messages",
- NULL,
- "ipc message queues",
- NULL,
- "Number of Messages in IPC Message Queues",
- "messages",
- "freebsd.plugin",
- "kern.ipc.msq",
- NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES,
- update_every,
- RRDSET_TYPE_AREA
- );
-
- rd_messages = rrddim_add(st_messages, "messages", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_messages, rd_messages, ipc_msq.messages);
- rrdset_done(st_messages);
-
- if (unlikely(!st_size)) {
- st_size = rrdset_create_localhost(
- "system",
- "ipc_msq_size",
- NULL,
- "ipc message queues",
- NULL,
- "Size of IPC Message Queues",
- "bytes",
- "freebsd.plugin",
- "kern.ipc.msq",
- NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_allocated = rrddim_add(st_size, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_used = rrddim_add(st_size, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_size, rd_allocated, ipc_msq.allocsize);
- rrddim_set_by_pointer(st_size, rd_used, ipc_msq.usedsize);
- rrdset_done(st_size);
- }
- }
-
- return 0;
-}
-
-// uptime
-
-int do_uptime(int update_every, usec_t dt) {
- (void)dt;
- struct timespec up_time;
-
- clock_gettime(CLOCK_UPTIME, &up_time);
-
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "uptime",
- NULL,
- "uptime",
- NULL,
- "System Uptime",
- "seconds",
- "freebsd.plugin",
- "uptime",
- NETDATA_CHART_PRIO_SYSTEM_UPTIME,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, up_time.tv_sec);
- rrdset_done(st);
- return 0;
-}
-
-// net.isr
-
-int do_net_isr(int update_every, usec_t dt) {
- (void)dt;
- static int do_netisr = -1, do_netisr_per_core = -1;
-
- if (unlikely(do_netisr == -1)) {
- do_netisr = config_get_boolean("plugin:freebsd:net.isr", "netisr", 1);
- do_netisr_per_core = config_get_boolean("plugin:freebsd:net.isr", "netisr per core", 1);
- }
-
- static struct netisr_stats {
- collected_number dispatched;
- collected_number hybrid_dispatched;
- collected_number qdrops;
- collected_number queued;
- } *netisr_stats = NULL;
-
- if (likely(do_netisr || do_netisr_per_core)) {
- static int mib_workstream[3] = {0, 0, 0}, mib_work[3] = {0, 0, 0};
- size_t netisr_workstream_size = 0, netisr_work_size = 0;
- static struct sysctl_netisr_workstream *netisr_workstream = NULL;
- static struct sysctl_netisr_work *netisr_work = NULL;
- unsigned long num_netisr_workstreams = 0, num_netisr_works = 0;
- int common_error = 0;
-
- if (unlikely(GETSYSCTL_SIZE("net.isr.workstream", mib_workstream, netisr_workstream_size))) {
- common_error = 1;
- } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
- common_error = 1;
- } else {
- static size_t old_netisr_workstream_size = 0;
-
- num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream);
- if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) {
- netisr_workstream = reallocz(netisr_workstream,
- num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
- old_netisr_workstream_size = netisr_workstream_size;
- }
- if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream,
- num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){
- common_error = 1;
- } else {
- static size_t old_netisr_work_size = 0;
-
- num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work);
- if (unlikely(netisr_work_size != old_netisr_work_size)) {
- netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
- old_netisr_work_size = netisr_work_size;
- }
- if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work,
- num_netisr_works * sizeof(struct sysctl_netisr_work)))){
- common_error = 1;
- }
- }
- }
- if (unlikely(common_error)) {
- do_netisr = 0;
- collector_error("DISABLED: system.softnet_stat chart");
- do_netisr_per_core = 0;
- collector_error("DISABLED: system.cpuX_softnet_stat chart");
- common_error = 0;
- collector_error("DISABLED: net.isr module");
- return 1;
- } else {
- unsigned long i, n;
- int j;
- static int old_number_of_cpus = 0;
-
- if (unlikely(number_of_cpus != old_number_of_cpus)) {
- netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
- old_number_of_cpus = number_of_cpus;
- }
- memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats));
- for (i = 0; i < num_netisr_workstreams; i++) {
- for (n = 0; n < num_netisr_works; n++) {
- if (netisr_workstream[i].snws_wsid == netisr_work[n].snw_wsid) {
- netisr_stats[netisr_workstream[i].snws_cpu].dispatched += netisr_work[n].snw_dispatched;
- netisr_stats[netisr_workstream[i].snws_cpu].hybrid_dispatched += netisr_work[n].snw_hybrid_dispatched;
- netisr_stats[netisr_workstream[i].snws_cpu].qdrops += netisr_work[n].snw_qdrops;
- netisr_stats[netisr_workstream[i].snws_cpu].queued += netisr_work[n].snw_queued;
- }
- }
- }
- for (j = 0; j < number_of_cpus; j++) {
- netisr_stats[number_of_cpus].dispatched += netisr_stats[j].dispatched;
- netisr_stats[number_of_cpus].hybrid_dispatched += netisr_stats[j].hybrid_dispatched;
- netisr_stats[number_of_cpus].qdrops += netisr_stats[j].qdrops;
- netisr_stats[number_of_cpus].queued += netisr_stats[j].queued;
- }
- }
- } else {
- collector_error("DISABLED: net.isr module");
- return 1;
- }
-
- if (likely(do_netisr)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_dispatched = NULL, *rd_hybrid_dispatched = NULL, *rd_qdrops = NULL, *rd_queued = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "softnet_stat",
- NULL,
- "softnet_stat",
- NULL,
- "System softnet_stat",
- "events/s",
- "freebsd.plugin",
- "net.isr",
- NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_dispatched = rrddim_add(st, "dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_hybrid_dispatched = rrddim_add(st, "hybrid_dispatched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_qdrops = rrddim_add(st, "qdrops", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_queued = rrddim_add(st, "queued", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_dispatched, netisr_stats[number_of_cpus].dispatched);
- rrddim_set_by_pointer(st, rd_hybrid_dispatched, netisr_stats[number_of_cpus].hybrid_dispatched);
- rrddim_set_by_pointer(st, rd_qdrops, netisr_stats[number_of_cpus].qdrops);
- rrddim_set_by_pointer(st, rd_queued, netisr_stats[number_of_cpus].queued);
- rrdset_done(st);
- }
-
- if (likely(do_netisr_per_core)) {
- static struct softnet_chart {
- char netisr_cpuid[MAX_INT_DIGITS + 17];
- RRDSET *st;
- RRDDIM *rd_dispatched;
- RRDDIM *rd_hybrid_dispatched;
- RRDDIM *rd_qdrops;
- RRDDIM *rd_queued;
- } *all_softnet_charts = NULL;
- static int old_number_of_cpus = 0;
- int i;
-
- if(unlikely(number_of_cpus > old_number_of_cpus)) {
- all_softnet_charts = reallocz(all_softnet_charts, sizeof(struct softnet_chart) * number_of_cpus);
- memset(&all_softnet_charts[old_number_of_cpus], 0, sizeof(struct softnet_chart) * (number_of_cpus - old_number_of_cpus));
- old_number_of_cpus = number_of_cpus;
- }
-
- for (i = 0; i < number_of_cpus ;i++) {
- snprintfz(all_softnet_charts[i].netisr_cpuid, MAX_INT_DIGITS + 17, "cpu%d_softnet_stat", i);
-
- if (unlikely(!all_softnet_charts[i].st)) {
- all_softnet_charts[i].st = rrdset_create_localhost(
- "cpu",
- all_softnet_charts[i].netisr_cpuid,
- NULL,
- "softnet_stat",
- "cpu.softnet_stat",
- "Per CPU netisr statistics",
- "events/s",
- "freebsd.plugin",
- "net.isr",
- NETDATA_CHART_PRIO_SOFTNET_PER_CORE + i,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- all_softnet_charts[i].rd_dispatched = rrddim_add(all_softnet_charts[i].st, "dispatched",
- NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- all_softnet_charts[i].rd_hybrid_dispatched = rrddim_add(all_softnet_charts[i].st, "hybrid_dispatched",
- NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- all_softnet_charts[i].rd_qdrops = rrddim_add(all_softnet_charts[i].st, "qdrops",
- NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- all_softnet_charts[i].rd_queued = rrddim_add(all_softnet_charts[i].st, "queued",
- NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_dispatched,
- netisr_stats[i].dispatched);
- rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_hybrid_dispatched,
- netisr_stats[i].hybrid_dispatched);
- rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_qdrops,
- netisr_stats[i].qdrops);
- rrddim_set_by_pointer(all_softnet_charts[i].st, all_softnet_charts[i].rd_queued,
- netisr_stats[i].queued);
- rrdset_done(all_softnet_charts[i].st);
- }
- }
-
- return 0;
-}
-
-// net.inet.tcp.states
-
-int do_net_inet_tcp_states(int update_every, usec_t dt) {
- (void)dt;
- static int mib[4] = {0, 0, 0, 0};
- uint64_t tcps_states[TCP_NSTATES];
-
- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
- if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.states", mib, tcps_states))) {
- collector_error("DISABLED: ipv4.tcpsock chart");
- collector_error("DISABLED: net.inet.tcp.states module");
- return 1;
- } else {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcpsock",
- NULL,
- "tcp",
- NULL,
- "IPv4 TCP Connections",
- "active connections",
- "freebsd.plugin",
- "net.inet.tcp.states",
- 2500,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, tcps_states[TCPS_ESTABLISHED]);
- rrdset_done(st);
- }
-
- return 0;
-}
-
-// net.inet.tcp.stats
-
-int do_net_inet_tcp_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcpext_connaborts = -1, do_tcpext_ofo = -1,
- do_tcpext_syncookies = -1, do_tcpext_listen = -1, do_ecn = -1;
-
- if (unlikely(do_tcp_packets == -1)) {
- do_tcp_packets = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP packets", 1);
- do_tcp_errors = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP errors", 1);
- do_tcp_handshake = config_get_boolean("plugin:freebsd:net.inet.tcp.stats", "ipv4 TCP handshake issues", 1);
- do_tcpext_connaborts = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP connection aborts",
- CONFIG_BOOLEAN_AUTO);
- do_tcpext_ofo = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP out-of-order queue",
- CONFIG_BOOLEAN_AUTO);
- do_tcpext_syncookies = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP SYN cookies",
- CONFIG_BOOLEAN_AUTO);
- do_tcpext_listen = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "TCP listen issues",
- CONFIG_BOOLEAN_AUTO);
- do_ecn = config_get_boolean_ondemand("plugin:freebsd:net.inet.tcp.stats", "ECN packets",
- CONFIG_BOOLEAN_AUTO);
- }
-
- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
- if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo ||
- do_tcpext_syncookies || do_tcpext_listen || do_ecn)) {
- static int mib[4] = {0, 0, 0, 0};
- struct tcpstat tcpstat;
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet.tcp.stats", mib, tcpstat))) {
- do_tcp_packets = 0;
- collector_error("DISABLED: ipv4.tcppackets chart");
- do_tcp_errors = 0;
- collector_error("DISABLED: ipv4.tcperrors chart");
- do_tcp_handshake = 0;
- collector_error("DISABLED: ipv4.tcphandshake chart");
- do_tcpext_connaborts = 0;
- collector_error("DISABLED: ipv4.tcpconnaborts chart");
- do_tcpext_ofo = 0;
- collector_error("DISABLED: ipv4.tcpofo chart");
- do_tcpext_syncookies = 0;
- collector_error("DISABLED: ipv4.tcpsyncookies chart");
- do_tcpext_listen = 0;
- collector_error("DISABLED: ipv4.tcplistenissues chart");
- do_ecn = 0;
- collector_error("DISABLED: ipv4.ecnpkts chart");
- collector_error("DISABLED: net.inet.tcp.stats module");
- return 1;
- } else {
- if (likely(do_tcp_packets)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_segs = NULL, *rd_out_segs = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcppackets",
- NULL,
- "tcp",
- NULL,
- "IPv4 TCP Packets",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 2600,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_segs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_segs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_segs, tcpstat.tcps_rcvtotal);
- rrddim_set_by_pointer(st, rd_out_segs, tcpstat.tcps_sndtotal);
- rrdset_done(st);
- }
-
- if (likely(do_tcp_errors)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_errs = NULL, *rd_in_csum_errs = NULL, *rd_retrans_segs = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcperrors",
- NULL,
- "tcp",
- NULL,
- "IPv4 TCP Errors",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 2700,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
-#if __FreeBSD__ >= 11
- rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvreassfull +
- tcpstat.tcps_rcvshort);
-#else
- rrddim_set_by_pointer(st, rd_in_errs, tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
-#endif
- rrddim_set_by_pointer(st, rd_in_csum_errs, tcpstat.tcps_rcvbadsum);
- rrddim_set_by_pointer(st, rd_retrans_segs, tcpstat.tcps_sndrexmitpack);
- rrdset_done(st);
- }
-
- if (likely(do_tcp_handshake)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_estab_resets = NULL, *rd_active_opens = NULL, *rd_passive_opens = NULL,
- *rd_attempt_fails = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcphandshake",
- NULL,
- "tcp",
- NULL,
- "IPv4 TCP Handshake Issues",
- "events/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 2900,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_attempt_fails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_estab_resets, tcpstat.tcps_drops);
- rrddim_set_by_pointer(st, rd_active_opens, tcpstat.tcps_connattempt);
- rrddim_set_by_pointer(st, rd_passive_opens, tcpstat.tcps_accepts);
- rrddim_set_by_pointer(st, rd_attempt_fails, tcpstat.tcps_conndrops);
- rrdset_done(st);
- }
-
- if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_rcvpackafterwin ||
- tcpstat.tcps_rcvafterclose ||
- tcpstat.tcps_rcvmemdrop ||
- tcpstat.tcps_persistdrop ||
- tcpstat.tcps_finwait2_drops ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_on_data = NULL, *rd_on_close = NULL, *rd_on_memory = NULL,
- *rd_on_timeout = NULL, *rd_on_linger = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcpconnaborts",
- NULL,
- "tcp",
- NULL,
- "TCP Connection Aborts",
- "connections/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 3010,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_on_data = rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_on_close = rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_on_memory = rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_on_timeout = rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_on_linger = rrddim_add(st, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_on_data, tcpstat.tcps_rcvpackafterwin);
- rrddim_set_by_pointer(st, rd_on_close, tcpstat.tcps_rcvafterclose);
- rrddim_set_by_pointer(st, rd_on_memory, tcpstat.tcps_rcvmemdrop);
- rrddim_set_by_pointer(st, rd_on_timeout, tcpstat.tcps_persistdrop);
- rrddim_set_by_pointer(st, rd_on_linger, tcpstat.tcps_finwait2_drops);
- rrdset_done(st);
- }
-
- if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_rcvoopack ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_ofo = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ofo_queue = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcpofo",
- NULL,
- "tcp",
- NULL,
- "TCP Out-Of-Order Queue",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 3050,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_ofo_queue = rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ofo_queue, tcpstat.tcps_rcvoopack);
- rrdset_done(st);
- }
-
- if (do_tcpext_syncookies == CONFIG_BOOLEAN_YES || (do_tcpext_syncookies == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_sc_sendcookie ||
- tcpstat.tcps_sc_recvcookie ||
- tcpstat.tcps_sc_zonefail ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_syncookies = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_recv = NULL, *rd_send = NULL, *rd_failed = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "tcpsyncookies",
- NULL,
- "tcp",
- NULL,
- "TCP SYN Cookies",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 3100,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_recv = rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_send = rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_recv, tcpstat.tcps_sc_recvcookie);
- rrddim_set_by_pointer(st, rd_send, tcpstat.tcps_sc_sendcookie);
- rrddim_set_by_pointer(st, rd_failed, tcpstat.tcps_sc_zonefail);
- rrdset_done(st);
- }
-
- if(do_tcpext_listen == CONFIG_BOOLEAN_YES || (do_tcpext_listen == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_listendrop ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_listen = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_listen = NULL;
- static RRDDIM *rd_overflows = NULL;
-
- if(unlikely(!st_listen)) {
-
- st_listen = rrdset_create_localhost(
- "ipv4",
- "tcplistenissues",
- NULL,
- "tcp",
- NULL,
- "TCP Listen Socket Issues",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 3015,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_overflows = rrddim_add(st_listen, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_listen, rd_overflows, tcpstat.tcps_listendrop);
- rrdset_done(st_listen);
- }
-
- if (do_ecn == CONFIG_BOOLEAN_YES || ( do_ecn == CONFIG_BOOLEAN_AUTO &&
- ( netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES ||
-#if __FreeBSD_version < 1400074
-// See https://github.com/freebsd/freebsd-src/commit/1a70101a870015304d5b2446b480d8677d8aad36
- tcpstat.tcps_ecn_ce ||
- tcpstat.tcps_ecn_ect0 ||
- tcpstat.tcps_ecn_ect1
-#else
- tcpstat.tcps_ecn_rcvce ||
- tcpstat.tcps_ecn_rcvect0 ||
- tcpstat.tcps_ecn_rcvect1 ||
- tcpstat.tcps_ecn_sndect0 ||
- tcpstat.tcps_ecn_sndect1
-#endif
- ))) {
- do_ecn = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_rcvce = NULL,
-#if __FreeBSD_version < 1400074
- *rd_ect0 = NULL,
- *rd_ect1 = NULL;
-#else
- *rd_rcvect0 = NULL,
- *rd_rcvect1 = NULL,
- *rd_sndect0 = NULL,
- *rd_sndect1 = NULL;
-#endif
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "ecnpkts",
- NULL,
- "ecn",
- NULL,
- "IPv4 ECN Statistics",
- "packets/s",
- "freebsd.plugin",
- "net.inet.tcp.stats",
- 8700,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_rcvce = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#if __FreeBSD_version < 1400074
- rd_ect0 = rrddim_add(st, "ECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ect1 = rrddim_add(st, "ECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#else
- rd_rcvect0 = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_rcvect1 = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sndect0 = rrddim_add(st, "OutECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sndect1 = rrddim_add(st, "OutECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- }
-
-
-#if __FreeBSD_version < 1400074
- rrddim_set_by_pointer(st, rd_rcvce, tcpstat.tcps_ecn_ce);
- rrddim_set_by_pointer(st, rd_ect0, tcpstat.tcps_ecn_ect0);
- rrddim_set_by_pointer(st, rd_ect1, tcpstat.tcps_ecn_ect1);
-#else
- rrddim_set_by_pointer(st, rd_rcvce, tcpstat.tcps_ecn_rcvce);
- rrddim_set_by_pointer(st, rd_rcvect0, tcpstat.tcps_ecn_rcvect0);
- rrddim_set_by_pointer(st, rd_rcvect1, tcpstat.tcps_ecn_rcvect1);
- rrddim_set_by_pointer(st, rd_sndect0, tcpstat.tcps_ecn_sndect0);
- rrddim_set_by_pointer(st, rd_sndect1, tcpstat.tcps_ecn_sndect1);
-#endif
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet.tcp.stats module");
- return 1;
- }
-
- return 0;
-}
-
-// net.inet.udp.stats
-
-int do_net_inet_udp_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_udp_packets = -1, do_udp_errors = -1;
-
- if (unlikely(do_udp_packets == -1)) {
- do_udp_packets = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP packets", 1);
- do_udp_errors = config_get_boolean("plugin:freebsd:net.inet.udp.stats", "ipv4 UDP errors", 1);
- }
-
- // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
- if (likely(do_udp_packets || do_udp_errors)) {
- static int mib[4] = {0, 0, 0, 0};
- struct udpstat udpstat;
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet.udp.stats", mib, udpstat))) {
- do_udp_packets = 0;
- collector_error("DISABLED: ipv4.udppackets chart");
- do_udp_errors = 0;
- collector_error("DISABLED: ipv4.udperrors chart");
- collector_error("DISABLED: net.inet.udp.stats module");
- return 1;
- } else {
- if (likely(do_udp_packets)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "udppackets",
- NULL,
- "udp",
- NULL,
- "IPv4 UDP Packets",
- "packets/s",
- "freebsd.plugin",
- "net.inet.udp.stats",
- 2601,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, udpstat.udps_ipackets);
- rrddim_set_by_pointer(st, rd_out, udpstat.udps_opackets);
- rrdset_done(st);
- }
-
- if (likely(do_udp_errors)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_errors = NULL, *rd_no_ports = NULL, *rd_recv_buf_errors = NULL,
- *rd_in_csum_errors = NULL, *rd_ignored_multi = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "udperrors",
- NULL,
- "udp",
- NULL,
- "IPv4 UDP Errors",
- "events/s",
- "freebsd.plugin",
- "net.inet.udp.stats",
- 2701,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ignored_multi = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_errors, udpstat.udps_hdrops + udpstat.udps_badlen);
- rrddim_set_by_pointer(st, rd_no_ports, udpstat.udps_noport);
- rrddim_set_by_pointer(st, rd_recv_buf_errors, udpstat.udps_fullsock);
- rrddim_set_by_pointer(st, rd_in_csum_errors, udpstat.udps_badsum + udpstat.udps_nosum);
- rrddim_set_by_pointer(st, rd_ignored_multi, udpstat.udps_filtermcast);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet.udp.stats module");
- return 1;
- }
-
- return 0;
-}
-
-// net.inet.icmp.stats
-
-int do_net_inet_icmp_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_icmp_packets = -1, do_icmp_errors = -1, do_icmpmsg = -1;
-
- if (unlikely(do_icmp_packets == -1)) {
- do_icmp_packets = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP packets", 1);
- do_icmp_errors = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP errors", 1);
- do_icmpmsg = config_get_boolean("plugin:freebsd:net.inet.icmp.stats", "ipv4 ICMP messages", 1);
- }
-
- if (likely(do_icmp_packets || do_icmp_errors || do_icmpmsg)) {
- static int mib[4] = {0, 0, 0, 0};
- struct icmpstat icmpstat;
- struct icmp_total {
- u_long msgs_in;
- u_long msgs_out;
- } icmp_total = {0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet.icmp.stats", mib, icmpstat))) {
- do_icmp_packets = 0;
- collector_error("DISABLED: ipv4.icmp chart");
- do_icmp_errors = 0;
- collector_error("DISABLED: ipv4.icmp_errors chart");
- do_icmpmsg = 0;
- collector_error("DISABLED: ipv4.icmpmsg chart");
- collector_error("DISABLED: net.inet.icmp.stats module");
- return 1;
- } else {
- int i;
-
- for (i = 0; i <= ICMP_MAXTYPE; i++) {
- icmp_total.msgs_in += icmpstat.icps_inhist[i];
- icmp_total.msgs_out += icmpstat.icps_outhist[i];
- }
- icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
-
- if (likely(do_icmp_packets)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmp"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Packets"
- , "packets/s"
- , "freebsd.plugin"
- , "net.inet.icmp.stats"
- , 2602
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_in = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, icmp_total.msgs_in);
- rrddim_set_by_pointer(st, rd_out, icmp_total.msgs_out);
- rrdset_done(st);
- }
-
- if (likely(do_icmp_errors)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_csum = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmp_errors"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Errors"
- , "packets/s"
- , "freebsd.plugin"
- , "net.inet.icmp.stats"
- , 2603
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_in = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_csum = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, icmpstat.icps_badcode + icmpstat.icps_badlen +
- icmpstat.icps_checksum + icmpstat.icps_tooshort);
- rrddim_set_by_pointer(st, rd_out, icmpstat.icps_error);
- rrddim_set_by_pointer(st, rd_in_csum, icmpstat.icps_checksum);
-
- rrdset_done(st);
- }
-
- if (likely(do_icmpmsg)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_reps = NULL, *rd_out_reps = NULL, *rd_in = NULL, *rd_out = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmpmsg"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Messages"
- , "packets/s"
- , "freebsd.plugin"
- , "net.inet.icmp.stats"
- , 2604
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_in_reps = rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_reps = rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_reps, icmpstat.icps_inhist[ICMP_ECHOREPLY]);
- rrddim_set_by_pointer(st, rd_out_reps, icmpstat.icps_outhist[ICMP_ECHOREPLY]);
- rrddim_set_by_pointer(st, rd_in, icmpstat.icps_inhist[ICMP_ECHO]);
- rrddim_set_by_pointer(st, rd_out, icmpstat.icps_outhist[ICMP_ECHO]);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet.icmp.stats module");
- return 1;
- }
-
- return 0;
-}
-
-// net.inet.ip.stats
-
-int do_net_inet_ip_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1;
-
- if (unlikely(do_ip_packets == -1)) {
- do_ip_packets = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 packets", 1);
- do_ip_fragsout = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments sent", 1);
- do_ip_fragsin = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 fragments assembly", 1);
- do_ip_errors = config_get_boolean("plugin:freebsd:net.inet.ip.stats", "ipv4 errors", 1);
- }
-
- // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
- if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
- static int mib[4] = {0, 0, 0, 0};
- struct ipstat ipstat;
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet.ip.stats", mib, ipstat))) {
- do_ip_packets = 0;
- collector_error("DISABLED: ipv4.packets chart");
- do_ip_fragsout = 0;
- collector_error("DISABLED: ipv4.fragsout chart");
- do_ip_fragsin = 0;
- collector_error("DISABLED: ipv4.fragsin chart");
- do_ip_errors = 0;
- collector_error("DISABLED: ipv4.errors chart");
- collector_error("DISABLED: net.inet.ip.stats module");
- return 1;
- } else {
- if (likely(do_ip_packets)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_receives = NULL, *rd_out_requests = NULL, *rd_forward_datagrams = NULL,
- *rd_in_delivers = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "packets",
- NULL,
- "packets",
- NULL,
- "IPv4 Packets",
- "packets/s",
- "freebsd.plugin",
- "net.inet.ip.stats",
- 3000,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_receives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_requests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_forward_datagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_delivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_receives, ipstat.ips_total);
- rrddim_set_by_pointer(st, rd_out_requests, ipstat.ips_localout);
- rrddim_set_by_pointer(st, rd_forward_datagrams, ipstat.ips_forward);
- rrddim_set_by_pointer(st, rd_in_delivers, ipstat.ips_delivered);
- rrdset_done(st);
- }
-
- if (likely(do_ip_fragsout)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL, *rd_fails = NULL, *rd_created = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "fragsout",
- NULL,
- "fragments",
- NULL,
- "IPv4 Fragments Sent",
- "packets/s",
- "freebsd.plugin",
- "net.inet.ip.stats",
- 3010,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragmented);
- rrddim_set_by_pointer(st, rd_fails, ipstat.ips_cantfrag);
- rrddim_set_by_pointer(st, rd_created, ipstat.ips_ofragments);
- rrdset_done(st);
- }
-
- if (likely(do_ip_fragsin)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "fragsin",
- NULL,
- "fragments",
- NULL,
- "IPv4 Fragments Reassembly",
- "packets/s",
- "freebsd.plugin",
- "net.inet.ip.stats",
- 3011,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, ipstat.ips_fragments);
- rrddim_set_by_pointer(st, rd_failed, ipstat.ips_fragdropped);
- rrddim_set_by_pointer(st, rd_all, ipstat.ips_reassembled);
- rrdset_done(st);
- }
-
- if (likely(do_ip_errors)) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
- *rd_in_hdr_errors = NULL, *rd_out_no_routes = NULL,
- *rd_in_addr_errors = NULL, *rd_in_unknown_protos = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4",
- "errors",
- NULL,
- "errors",
- NULL,
- "IPv4 Errors",
- "packets/s",
- "freebsd.plugin",
- "net.inet.ip.stats",
- 3002,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_unknown_protos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_discards, ipstat.ips_badsum + ipstat.ips_tooshort +
- ipstat.ips_toosmall + ipstat.ips_toolong);
- rrddim_set_by_pointer(st, rd_out_discards, ipstat.ips_odropped);
- rrddim_set_by_pointer(st, rd_in_hdr_errors, ipstat.ips_badhlen + ipstat.ips_badlen +
- ipstat.ips_badoptions + ipstat.ips_badvers);
- rrddim_set_by_pointer(st, rd_out_no_routes, ipstat.ips_noroute);
- rrddim_set_by_pointer(st, rd_in_addr_errors, ipstat.ips_badaddr);
- rrddim_set_by_pointer(st, rd_in_unknown_protos, ipstat.ips_noproto);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet.ip.stats module");
- return 1;
- }
-
- return 0;
-}
-
-// net.inet6.ip6.stats
-
-int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1;
-
- if (unlikely(do_ip6_packets == -1)) {
- do_ip6_packets = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 packets",
- CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsout = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments sent",
- CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsin = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 fragments assembly",
- CONFIG_BOOLEAN_AUTO);
- do_ip6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.ip6.stats", "ipv6 errors",
- CONFIG_BOOLEAN_AUTO);
- }
-
- if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
- static int mib[4] = {0, 0, 0, 0};
- struct ip6stat ip6stat;
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet6.ip6.stats", mib, ip6stat))) {
- do_ip6_packets = 0;
- collector_error("DISABLED: ipv6.packets chart");
- do_ip6_fragsout = 0;
- collector_error("DISABLED: ipv6.fragsout chart");
- do_ip6_fragsin = 0;
- collector_error("DISABLED: ipv6.fragsin chart");
- do_ip6_errors = 0;
- collector_error("DISABLED: ipv6.errors chart");
- collector_error("DISABLED: net.inet6.ip6.stats module");
- return 1;
- } else {
- if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_localout ||
- ip6stat.ip6s_total ||
- ip6stat.ip6s_forward ||
- ip6stat.ip6s_delivered ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_packets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_forwarded = NULL, *rd_delivers = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "packets",
- NULL,
- "packets",
- NULL,
- "IPv6 Packets",
- "packets/s",
- "freebsd.plugin",
- "net.inet6.ip6.stats",
- 3000,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_forwarded = rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_delivers = rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_sent, ip6stat.ip6s_localout);
- rrddim_set_by_pointer(st, rd_received, ip6stat.ip6s_total);
- rrddim_set_by_pointer(st, rd_forwarded, ip6stat.ip6s_forward);
- rrddim_set_by_pointer(st, rd_delivers, ip6stat.ip6s_delivered);
- rrdset_done(st);
- }
-
- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_fragmented ||
- ip6stat.ip6s_cantfrag ||
- ip6stat.ip6s_ofragments ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsout = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_all = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "fragsout",
- NULL,
- "fragments",
- NULL,
- "IPv6 Fragments Sent",
- "packets/s",
- "freebsd.plugin",
- "net.inet6.ip6.stats",
- 3010,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_fragmented);
- rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_cantfrag);
- rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_ofragments);
- rrdset_done(st);
- }
-
- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_reassembled ||
- ip6stat.ip6s_fragdropped ||
- ip6stat.ip6s_fragtimeout ||
- ip6stat.ip6s_fragments ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsin = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL, *rd_failed = NULL, *rd_timeout = NULL, *rd_all = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "fragsin",
- NULL,
- "fragments",
- NULL,
- "IPv6 Fragments Reassembly",
- "packets/s",
- "freebsd.plugin",
- "net.inet6.ip6.stats",
- 3011,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, ip6stat.ip6s_reassembled);
- rrddim_set_by_pointer(st, rd_failed, ip6stat.ip6s_fragdropped);
- rrddim_set_by_pointer(st, rd_timeout, ip6stat.ip6s_fragtimeout);
- rrddim_set_by_pointer(st, rd_all, ip6stat.ip6s_fragments);
- rrdset_done(st);
- }
-
- if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_toosmall ||
- ip6stat.ip6s_odropped ||
- ip6stat.ip6s_badoptions ||
- ip6stat.ip6s_badvers ||
- ip6stat.ip6s_exthdrtoolong ||
- ip6stat.ip6s_sources_none ||
- ip6stat.ip6s_tooshort ||
- ip6stat.ip6s_cantforward ||
- ip6stat.ip6s_noroute ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_errors = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_discards = NULL, *rd_out_discards = NULL,
- *rd_in_hdr_errors = NULL, *rd_in_addr_errors = NULL, *rd_in_truncated_pkts = NULL,
- *rd_in_no_routes = NULL, *rd_out_no_routes = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "errors",
- NULL,
- "errors",
- NULL,
- "IPv6 Errors",
- "packets/s",
- "freebsd.plugin",
- "net.inet6.ip6.stats",
- 3002,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_addr_errors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_truncated_pkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_no_routes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_no_routes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_discards, ip6stat.ip6s_toosmall);
- rrddim_set_by_pointer(st, rd_out_discards, ip6stat.ip6s_odropped);
- rrddim_set_by_pointer(st, rd_in_hdr_errors, ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers +
- ip6stat.ip6s_exthdrtoolong);
- rrddim_set_by_pointer(st, rd_in_addr_errors, ip6stat.ip6s_sources_none);
- rrddim_set_by_pointer(st, rd_in_truncated_pkts, ip6stat.ip6s_tooshort);
- rrddim_set_by_pointer(st, rd_in_no_routes, ip6stat.ip6s_cantforward);
- rrddim_set_by_pointer(st, rd_out_no_routes, ip6stat.ip6s_noroute);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet6.ip6.stats module");
- return 1;
- }
-
- return 0;
-}
-
-// net.inet6.icmp6.stats
-
-int do_net_inet6_icmp6_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1, do_icmp6_router = -1,
- do_icmp6_neighbor = -1, do_icmp6_types = -1;
-
- if (unlikely(do_icmp6 == -1)) {
- do_icmp6 = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_redir = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp redirects",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_errors = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp errors",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_echos = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp echos",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_router = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp router",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_neighbor = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp neighbor",
- CONFIG_BOOLEAN_AUTO);
- do_icmp6_types = config_get_boolean_ondemand("plugin:freebsd:net.inet6.icmp6.stats", "icmp types",
- CONFIG_BOOLEAN_AUTO);
- }
-
- if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
- static int mib[4] = {0, 0, 0, 0};
- struct icmp6stat icmp6stat;
-
- if (unlikely(GETSYSCTL_SIMPLE("net.inet6.icmp6.stats", mib, icmp6stat))) {
- do_icmp6 = 0;
- collector_error("DISABLED: ipv6.icmp chart");
- do_icmp6_redir = 0;
- collector_error("DISABLED: ipv6.icmpredir chart");
- do_icmp6_errors = 0;
- collector_error("DISABLED: ipv6.icmperrors chart");
- do_icmp6_echos = 0;
- collector_error("DISABLED: ipv6.icmpechos chart");
- do_icmp6_router = 0;
- collector_error("DISABLED: ipv6.icmprouter chart");
- do_icmp6_neighbor = 0;
- collector_error("DISABLED: ipv6.icmpneighbor chart");
- do_icmp6_types = 0;
- collector_error("DISABLED: ipv6.icmptypes chart");
- collector_error("DISABLED: net.inet6.icmp6.stats module");
- return 1;
- } else {
- int i;
- struct icmp6_total {
- u_long msgs_in;
- u_long msgs_out;
- } icmp6_total = {0, 0};
-
- for (i = 0; i <= ICMP6_MAXTYPE; i++) {
- icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
- icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
- }
- icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
-
- // --------------------------------------------------------------------
-
- if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO &&
- (icmp6_total.msgs_in ||
- icmp6_total.msgs_out ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6 = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL, *rd_sent = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmp",
- NULL,
- "icmp",
- NULL,
- "IPv6 ICMP Messages",
- "messages/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10000,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, icmp6_total.msgs_out);
- rrddim_set_by_pointer(st, rd_sent, icmp6_total.msgs_in);
- rrdset_done(st);
- }
-
- if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_REDIRECT] ||
- icmp6stat.icp6s_outhist[ND_REDIRECT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_redir = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL, *rd_sent = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmpredir",
- NULL,
- "icmp",
- NULL,
- "IPv6 ICMP Redirects",
- "redirects/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10050,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, icmp6stat.icp6s_outhist[ND_REDIRECT]);
- rrddim_set_by_pointer(st, rd_sent, icmp6stat.icp6s_inhist[ND_REDIRECT]);
- rrdset_done(st);
- }
-
- if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_badcode ||
- icmp6stat.icp6s_badlen ||
- icmp6stat.icp6s_checksum ||
- icmp6stat.icp6s_tooshort ||
- icmp6stat.icp6s_error ||
- icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
- icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
- icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
- icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
- icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
- icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_errors = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_errors = NULL, *rd_out_errors = NULL, *rd_in_csum_errors = NULL,
- *rd_in_dest_unreachs = NULL, *rd_in_pkt_too_bigs = NULL, *rd_in_time_excds = NULL,
- *rd_in_parm_problems = NULL, *rd_out_dest_unreachs = NULL, *rd_out_time_excds = NULL,
- *rd_out_parm_problems = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmperrors",
- NULL, "icmp",
- NULL,
- "IPv6 ICMP Errors",
- "errors/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10100,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_errors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_csum_errors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_dest_unreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_pkt_too_bigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_time_excds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_parm_problems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_dest_unreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_time_excds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_parm_problems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_errors, icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen +
- icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
- rrddim_set_by_pointer(st, rd_out_errors, icmp6stat.icp6s_error);
- rrddim_set_by_pointer(st, rd_in_csum_errors, icmp6stat.icp6s_checksum);
- rrddim_set_by_pointer(st, rd_in_dest_unreachs, icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
- rrddim_set_by_pointer(st, rd_in_pkt_too_bigs, icmp6stat.icp6s_badlen);
- rrddim_set_by_pointer(st, rd_in_time_excds, icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
- rrddim_set_by_pointer(st, rd_in_parm_problems, icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
- rrddim_set_by_pointer(st, rd_out_dest_unreachs, icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
- rrddim_set_by_pointer(st, rd_out_time_excds, icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
- rrddim_set_by_pointer(st, rd_out_parm_problems, icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
- rrdset_done(st);
- }
-
- if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
- icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
- icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
- icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_echos = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL, *rd_in_replies = NULL, *rd_out_replies = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmpechos",
- NULL,
- "icmp",
- NULL,
- "IPv6 ICMP Echo",
- "messages/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10200,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_replies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_replies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in, icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
- rrddim_set_by_pointer(st, rd_out, icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
- rrddim_set_by_pointer(st, rd_in_replies, icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
- rrddim_set_by_pointer(st, rd_out_replies, icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
- rrdset_done(st);
- }
-
- if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
- icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
- icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
- icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_router = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
- *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmprouter",
- NULL,
- "icmp",
- NULL,
- "IPv6 Router Messages",
- "messages/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10400,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
- rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
- rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
- rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
- rrdset_done(st);
- }
-
- if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
- icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
- icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
- icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_solicits = NULL, *rd_out_solicits = NULL,
- *rd_in_advertisements = NULL, *rd_out_advertisements = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmpneighbor",
- NULL,
- "icmp",
- NULL,
- "IPv6 Neighbor Messages",
- "messages/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10500,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_solicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_solicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_advertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_advertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_solicits, icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
- rrddim_set_by_pointer(st, rd_out_solicits, icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
- rrddim_set_by_pointer(st, rd_in_advertisements, icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
- rrddim_set_by_pointer(st, rd_out_advertisements, icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
- rrdset_done(st);
- }
-
- if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[1] ||
- icmp6stat.icp6s_inhist[128] ||
- icmp6stat.icp6s_inhist[129] ||
- icmp6stat.icp6s_inhist[136] ||
- icmp6stat.icp6s_outhist[1] ||
- icmp6stat.icp6s_outhist[128] ||
- icmp6stat.icp6s_outhist[129] ||
- icmp6stat.icp6s_outhist[133] ||
- icmp6stat.icp6s_outhist[135] ||
- icmp6stat.icp6s_outhist[136] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_types = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_in_1 = NULL, *rd_in_128 = NULL, *rd_in_129 = NULL, *rd_in_136 = NULL,
- *rd_out_1 = NULL, *rd_out_128 = NULL, *rd_out_129 = NULL, *rd_out_133 = NULL,
- *rd_out_135 = NULL, *rd_out_143 = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6",
- "icmptypes",
- NULL,
- "icmp",
- NULL,
- "IPv6 ICMP Types",
- "messages/s",
- "freebsd.plugin",
- "net.inet6.icmp6.stats",
- 10700,
- update_every,
- RRDSET_TYPE_LINE
- );
-
- rd_in_1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_in_136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out_143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_in_1, icmp6stat.icp6s_inhist[1]);
- rrddim_set_by_pointer(st, rd_in_128, icmp6stat.icp6s_inhist[128]);
- rrddim_set_by_pointer(st, rd_in_129, icmp6stat.icp6s_inhist[129]);
- rrddim_set_by_pointer(st, rd_in_136, icmp6stat.icp6s_inhist[136]);
- rrddim_set_by_pointer(st, rd_out_1, icmp6stat.icp6s_outhist[1]);
- rrddim_set_by_pointer(st, rd_out_128, icmp6stat.icp6s_outhist[128]);
- rrddim_set_by_pointer(st, rd_out_129, icmp6stat.icp6s_outhist[129]);
- rrddim_set_by_pointer(st, rd_out_133, icmp6stat.icp6s_outhist[133]);
- rrddim_set_by_pointer(st, rd_out_135, icmp6stat.icp6s_outhist[135]);
- rrddim_set_by_pointer(st, rd_out_143, icmp6stat.icp6s_outhist[143]);
- rrdset_done(st);
- }
- }
- } else {
- collector_error("DISABLED: net.inet6.icmp6.stats module");
- return 1;
- }
-
- return 0;
-}
diff --git a/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md b/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md
deleted file mode 100644
index 5f18661d0..000000000
--- a/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md
+++ /dev/null
@@ -1,111 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "dev.cpu.0.freq"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# dev.cpu.0.freq
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: dev.cpu.0.freq
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Read current CPU Scaling frequency.
-
-Current CPU Scaling Frequency
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per dev.cpu.0.freq instance
-
-The metric shows status of CPU frequency, it is direct affected by system load.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.scaling_cur_freq | frequency | MHz |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `Config options`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config Config options
-```
-#### Options
-
-
-
-<details><summary></summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md b/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md
deleted file mode 100644
index a3736f771..000000000
--- a/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "dev.cpu.temperature"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# dev.cpu.temperature
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: dev.cpu.temperature
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Get current CPU temperature
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per dev.cpu.temperature instance
-
-This metric show latest CPU temperature.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.temperature | a dimension per core | Celsius |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/devstat.md b/collectors/freebsd.plugin/integrations/devstat.md
deleted file mode 100644
index 9d9c6400b..000000000
--- a/collectors/freebsd.plugin/integrations/devstat.md
+++ /dev/null
@@ -1,155 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/devstat.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "devstat"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# devstat
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: devstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information per hard disk available on host.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per devstat instance
-
-These metrics give a general vision about I/O events on disks.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.io | io, out | KiB/s |
-
-### Per disk
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.io | reads, writes, frees | KiB/s |
-| disk.ops | reads, writes, other, frees | operations/s |
-| disk.qops | operations | operations |
-| disk.util | utilization | % of time working |
-| disk.iotime | reads, writes, other, frees | milliseconds/s |
-| disk.await | reads, writes, other, frees | milliseconds/operation |
-| disk.avgsz | reads, writes, frees | KiB/operation |
-| disk.svctm | svctm | milliseconds/operation |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |
-| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |
-| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |
-| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |
-| operations for all disks | Enable or disable operations for all disks metric. | auto | no |
-| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |
-| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |
-| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |
-| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |
-| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |
-| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |
-| disable by default disks matching | Do not create charts for disks listed. | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/getifaddrs.md b/collectors/freebsd.plugin/integrations/getifaddrs.md
deleted file mode 100644
index 63c4ce136..000000000
--- a/collectors/freebsd.plugin/integrations/getifaddrs.md
+++ /dev/null
@@ -1,161 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/getifaddrs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "getifaddrs"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# getifaddrs
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: getifaddrs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect traffic per network interface.
-
-The plugin calls `getifaddrs` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per getifaddrs instance
-
-General overview about network traffic.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.net | received, sent | kilobits/s |
-| system.packets | received, sent, multicast_received, multicast_sent | packets/s |
-| system.ipv4 | received, sent | kilobits/s |
-| system.ipv6 | received, sent | kilobits/s |
-
-### Per network device
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| net.net | received, sent | kilobits/s |
-| net.packets | received, sent, multicast_received, multicast_sent | packets/s |
-| net.errors | inbound, outbound | errors/s |
-| net.drops | inbound, outbound | drops/s |
-| net.events | collisions | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |
-| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |
-| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |
-| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |
-| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |
-| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |
-| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |
-| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |
-| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |
-| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |
-| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |
-| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |
-| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |
-| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/getmntinfo.md b/collectors/freebsd.plugin/integrations/getmntinfo.md
deleted file mode 100644
index d26ad1c03..000000000
--- a/collectors/freebsd.plugin/integrations/getmntinfo.md
+++ /dev/null
@@ -1,131 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/getmntinfo.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "getmntinfo"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# getmntinfo
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: getmntinfo
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information per mount point.
-
-The plugin calls `getmntinfo` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per mount point
-
-These metrics show detailss about mount point usages.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.space | avail, used, reserved_for_root | GiB |
-| disk.inodes | avail, used, reserved_for_root | inodes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |
-| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |
-| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |
-| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |
-| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |
-| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/hw.intrcnt.md b/collectors/freebsd.plugin/integrations/hw.intrcnt.md
deleted file mode 100644
index 49164c369..000000000
--- a/collectors/freebsd.plugin/integrations/hw.intrcnt.md
+++ /dev/null
@@ -1,121 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/hw.intrcnt.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "hw.intrcnt"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# hw.intrcnt
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: hw.intrcnt
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Get total number of interrupts
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per hw.intrcnt instance
-
-These metrics show system interrupts frequency.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.intr | interrupts | interrupts/s |
-| system.interrupts | a dimension per interrupt | interrupts/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config option</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/ipfw.md b/collectors/freebsd.plugin/integrations/ipfw.md
deleted file mode 100644
index 84e023bdf..000000000
--- a/collectors/freebsd.plugin/integrations/ipfw.md
+++ /dev/null
@@ -1,126 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/ipfw.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "ipfw"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# ipfw
-
-
-<img src="https://netdata.cloud/img/firewall.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: ipfw
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about FreeBSD firewall.
-
-The plugin uses RAW socket to communicate with kernel and collect data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per ipfw instance
-
-Theese metrics show FreeBSD firewall statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipfw.mem | dynamic, static | bytes |
-| ipfw.packets | a dimension per static rule | packets/s |
-| ipfw.bytes | a dimension per static rule | bytes/s |
-| ipfw.active | a dimension per dynamic rule | rules |
-| ipfw.expired | a dimension per dynamic rule | rules |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| counters for static rules | Enable or disable counters for static rules metric. | yes | no |
-| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |
-| allocated memory | Enable or disable allocated memory metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/kern.cp_time.md b/collectors/freebsd.plugin/integrations/kern.cp_time.md
deleted file mode 100644
index 95bdb8d90..000000000
--- a/collectors/freebsd.plugin/integrations/kern.cp_time.md
+++ /dev/null
@@ -1,139 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/kern.cp_time.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "kern.cp_time"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# kern.cp_time
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: kern.cp_time
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Total CPU utilization
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per kern.cp_time instance
-
-These metrics show CPU usage statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.cpu | nice, system, user, interrupt, idle | percentage |
-
-### Per core
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.cpu | nice, system, user, interrupt, idle | percentage |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |
-| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |
-| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |
-| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-The netdata main configuration file.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| kern.cp_time | Enable or disable Total CPU usage. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.msq.md b/collectors/freebsd.plugin/integrations/kern.ipc.msq.md
deleted file mode 100644
index e7457e0c1..000000000
--- a/collectors/freebsd.plugin/integrations/kern.ipc.msq.md
+++ /dev/null
@@ -1,122 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/kern.ipc.msq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "kern.ipc.msq"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# kern.ipc.msq
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: kern.ipc.msq
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect number of IPC message Queues
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per kern.ipc.msq instance
-
-These metrics show statistics IPC messages statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ipc_msq_queues | queues | queues |
-| system.ipc_msq_messages | messages | messages |
-| system.ipc_msq_size | allocated, used | bytes |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.sem.md b/collectors/freebsd.plugin/integrations/kern.ipc.sem.md
deleted file mode 100644
index 7bf7235e6..000000000
--- a/collectors/freebsd.plugin/integrations/kern.ipc.sem.md
+++ /dev/null
@@ -1,127 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/kern.ipc.sem.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "kern.ipc.sem"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# kern.ipc.sem
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: kern.ipc.sem
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about semaphore.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per kern.ipc.sem instance
-
-These metrics shows counters for semaphores on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ipc_semaphores | semaphores | semaphores |
-| system.ipc_semaphore_arrays | arrays | arrays |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |
-| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/kern.ipc.shm.md b/collectors/freebsd.plugin/integrations/kern.ipc.shm.md
deleted file mode 100644
index 1f10c1e6e..000000000
--- a/collectors/freebsd.plugin/integrations/kern.ipc.shm.md
+++ /dev/null
@@ -1,121 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/kern.ipc.shm.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "kern.ipc.shm"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# kern.ipc.shm
-
-
-<img src="https://netdata.cloud/img/memory.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: kern.ipc.shm
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect shared memory information.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per kern.ipc.shm instance
-
-These metrics give status about current shared memory segments.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ipc_shared_mem_segs | segments | segments |
-| system.ipc_shared_mem_size | allocated | KiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md
deleted file mode 100644
index 29562bc9a..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md
+++ /dev/null
@@ -1,124 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet.icmp.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet.icmp.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet.icmp.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about ICMP traffic.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet.icmp.stats instance
-
-These metrics show ICMP connections statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv4.icmp | received, sent | packets/s |
-| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |
-| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |
-| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |
-| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md b/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md
deleted file mode 100644
index 785767e89..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md
+++ /dev/null
@@ -1,126 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet.ip.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet.ip.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet.ip.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect IP stats
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet.ip.stats instance
-
-These metrics show IPv4 connections statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv4.packets | received, sent, forwarded, delivered | packets/s |
-| ipv4.fragsout | ok, failed, created | packets/s |
-| ipv4.fragsin | ok, failed, all | packets/s |
-| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |
-| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |
-| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |
-| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md b/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md
deleted file mode 100644
index 5b4144580..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md
+++ /dev/null
@@ -1,125 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet.tcp.states"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet.tcp.states
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet.tcp.states
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet.tcp.states instance
-
-A counter for TCP connections.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv4.tcpsock | connections | active connections |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md
deleted file mode 100644
index be779740d..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md
+++ /dev/null
@@ -1,142 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet.tcp.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet.tcp.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet.tcp.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect overall information about TCP connections.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet.tcp.stats instance
-
-These metrics show TCP connections statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv4.tcppackets | received, sent | packets/s |
-| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |
-| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |
-| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |
-| ipv4.tcpofo | inqueue | packets/s |
-| ipv4.tcpsyncookies | received, sent, failed | packets/s |
-| ipv4.tcplistenissues | overflows | packets/s |
-| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |
-| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |
-| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |
-| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |
-| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |
-| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |
-| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |
-| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |
-| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |
-| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |
-| ECN packets | Enable or disable ECN packets metric. | auto | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md b/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md
deleted file mode 100644
index d3da40455..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md
+++ /dev/null
@@ -1,128 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet.udp.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet.udp.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet.udp.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about UDP connections.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet.udp.stats instance
-
-These metrics show UDP connections statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv4.udppackets | received, sent | packets/s |
-| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |
-| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |
-| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md b/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md
deleted file mode 100644
index 7344b79b3..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md
+++ /dev/null
@@ -1,132 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet6.icmp6.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet6.icmp6.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet6.icmp6.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information abou IPv6 ICMP
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet6.icmp6.stats instance
-
-Collect IPv6 ICMP traffic statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv6.icmp | received, sent | messages/s |
-| ipv6.icmpredir | received, sent | redirects/s |
-| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |
-| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |
-| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| icmp | Enable or disable ICMP metric. | auto | no |
-| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |
-| icmp errors | Enable or disable ICMP errors metric. | auto | no |
-| icmp echos | Enable or disable ICMP echos metric. | auto | no |
-| icmp router | Enable or disable ICMP router metric. | auto | no |
-| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |
-| icmp types | Enable or disable ICMP types metric. | auto | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md b/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md
deleted file mode 100644
index d9128b529..000000000
--- a/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md
+++ /dev/null
@@ -1,126 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.inet6.ip6.stats"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.inet6.ip6.stats
-
-
-<img src="https://netdata.cloud/img/network.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.inet6.ip6.stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information abou IPv6 stats.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.inet6.ip6.stats instance
-
-These metrics show general information about IPv6 connections.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv6.packets | received, sent, forwarded, delivers | packets/s |
-| ipv6.fragsout | ok, failed, all | packets/s |
-| ipv6.fragsin | ok, failed, timeout, all | packets/s |
-| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |
-| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |
-| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |
-| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/net.isr.md b/collectors/freebsd.plugin/integrations/net.isr.md
deleted file mode 100644
index 2d75b825a..000000000
--- a/collectors/freebsd.plugin/integrations/net.isr.md
+++ /dev/null
@@ -1,140 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/net.isr.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "net.isr"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# net.isr
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: net.isr
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about system softnet stat.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per net.isr instance
-
-These metrics show statistics about softnet stats.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |
-
-### Per core
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |
-| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |
-| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |
-| netisr per core | Enable or disable softnet stat metric per core. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/system.ram.md b/collectors/freebsd.plugin/integrations/system.ram.md
deleted file mode 100644
index 7d4974922..000000000
--- a/collectors/freebsd.plugin/integrations/system.ram.md
+++ /dev/null
@@ -1,129 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/system.ram.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "system.ram"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# system.ram
-
-
-<img src="https://netdata.cloud/img/memory.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: system.ram
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Show information about system memory usage.
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per system.ram instance
-
-This metric shows RAM usage statistics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |
-| mem.available | avail | MiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization |
-| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization |
-| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |
-| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| system.ram | Enable or disable system RAM metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/uptime.md b/collectors/freebsd.plugin/integrations/uptime.md
deleted file mode 100644
index e3f1db3f1..000000000
--- a/collectors/freebsd.plugin/integrations/uptime.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/uptime.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "uptime"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# uptime
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: uptime
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Show period of time server is up.
-
-The plugin calls `clock_gettime` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per uptime instance
-
-How long the system is running.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.uptime | uptime | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.loadavg | Enable or disable load average metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.loadavg.md b/collectors/freebsd.plugin/integrations/vm.loadavg.md
deleted file mode 100644
index 88c47b7a4..000000000
--- a/collectors/freebsd.plugin/integrations/vm.loadavg.md
+++ /dev/null
@@ -1,128 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.loadavg.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.loadavg"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.loadavg
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.loadavg
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-System Load Average
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.loadavg instance
-
-Monitoring for number of threads running or waiting.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.load | load1, load5, load15 | load |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | number of active CPU cores in the system |
-| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system fifteen-minute load average |
-| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system five-minute load average |
-| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system one-minute load average |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.loadavg | Enable or disable load average metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md
deleted file mode 100644
index c3e7466e9..000000000
--- a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.stats.sys.v_intr"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.stats.sys.v_intr
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.stats.sys.v_intr
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Device interrupts
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.stats.sys.v_intr instance
-
-The metric show device interrupt frequency.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.dev_intr | interrupts | interrupts/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config option</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md
deleted file mode 100644
index ce914bb50..000000000
--- a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.stats.sys.v_soft"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.stats.sys.v_soft
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.stats.sys.v_soft
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Software Interrupt
-
-vm.stats.sys.v_soft
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.stats.sys.v_soft instance
-
-This metric shows software interrupt frequency.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.soft_intr | interrupts | interrupts/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config option</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md b/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md
deleted file mode 100644
index cbcee311f..000000000
--- a/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md
+++ /dev/null
@@ -1,121 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.stats.sys.v_swtch"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.stats.sys.v_swtch
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.stats.sys.v_swtch
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-CPU context switch
-
-The plugin calls `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.stats.sys.v_swtch instance
-
-The metric count the number of context switches happening on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ctxt | switches | context switches/s |
-| system.forks | started | processes/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md
deleted file mode 100644
index 19230dd56..000000000
--- a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.stats.vm.v_pgfaults"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.stats.vm.v_pgfaults
-
-
-<img src="https://netdata.cloud/img/memory.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.stats.vm.v_pgfaults
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect memory page faults events.
-
-The plugin calls `sysctl` function to collect necessary data
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.stats.vm.v_pgfaults instance
-
-The number of page faults happened on host.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md b/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md
deleted file mode 100644
index c6caaa682..000000000
--- a/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md
+++ /dev/null
@@ -1,125 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.stats.vm.v_swappgs"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.stats.vm.v_swappgs
-
-
-<img src="https://netdata.cloud/img/memory.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.stats.vm.v_swappgs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-The metric swap amount of data read from and written to SWAP.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.stats.vm.v_swappgs instance
-
-This metric shows events happening on SWAP.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.swapio | io, out | KiB/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.swap_info.md b/collectors/freebsd.plugin/integrations/vm.swap_info.md
deleted file mode 100644
index caa22b3dc..000000000
--- a/collectors/freebsd.plugin/integrations/vm.swap_info.md
+++ /dev/null
@@ -1,125 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.swap_info.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.swap_info"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.swap_info
-
-
-<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.swap_info
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect information about SWAP memory.
-
-The plugin calls `sysctlnametomib` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.swap_info instance
-
-This metric shows the SWAP usage.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.swap | free, used | MiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ used_swap ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swap | swap memory utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| vm.swap_info | Enable or disable SWAP metrics. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/vm.vmtotal.md b/collectors/freebsd.plugin/integrations/vm.vmtotal.md
deleted file mode 100644
index f3f631af6..000000000
--- a/collectors/freebsd.plugin/integrations/vm.vmtotal.md
+++ /dev/null
@@ -1,129 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/vm.vmtotal.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "vm.vmtotal"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# vm.vmtotal
-
-
-<img src="https://netdata.cloud/img/memory.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: vm.vmtotal
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect Virtual Memory information from host.
-
-The plugin calls function `sysctl` to collect data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per vm.vmtotal instance
-
-These metrics show an overall vision about processes running.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.active_processes | active | processes |
-| system.processes | running, blocked | processes |
-| mem.real | used | MiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ active_processes ](https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config Options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enable total processes | Number of active processes. | yes | no |
-| processes running | Show number of processes running or blocked. | yes | no |
-| real memory | Memeory used on host. | yes | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/integrations/zfs.md b/collectors/freebsd.plugin/integrations/zfs.md
deleted file mode 100644
index 99f10026d..000000000
--- a/collectors/freebsd.plugin/integrations/zfs.md
+++ /dev/null
@@ -1,152 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/integrations/zfs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freebsd.plugin/metadata.yaml"
-sidebar_label: "zfs"
-learn_status: "Published"
-learn_rel_path: "Data Collection/FreeBSD"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# zfs
-
-
-<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
-
-
-Plugin: freebsd.plugin
-Module: zfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collect metrics for ZFS filesystem
-
-The plugin uses `sysctl` function to collect necessary data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per zfs instance
-
-These metrics show detailed information about ZFS filesystem.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| zfs.arc_size | arcsz, target, min, max | MiB |
-| zfs.l2_size | actual, size | MiB |
-| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |
-| zfs.bytes | read, write | KiB/s |
-| zfs.hits | hits, misses | percentage |
-| zfs.hits_rate | hits, misses | events/s |
-| zfs.dhits | hits, misses | percentage |
-| zfs.dhits_rate | hits, misses | events/s |
-| zfs.phits | hits, misses | percentage |
-| zfs.phits_rate | hits, misses | events/s |
-| zfs.mhits | hits, misses | percentage |
-| zfs.mhits_rate | hits, misses | events/s |
-| zfs.l2hits | hits, misses | percentage |
-| zfs.l2hits_rate | hits, misses | events/s |
-| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |
-| zfs.arc_size_breakdown | recent, frequent | percentage |
-| zfs.memory_ops | throttled | operations/s |
-| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |
-| zfs.actual_hits | hits, misses | percentage |
-| zfs.actual_hits_rate | hits, misses | events/s |
-| zfs.demand_data_hits | hits, misses | percentage |
-| zfs.demand_data_hits_rate | hits, misses | events/s |
-| zfs.prefetch_data_hits | hits, misses | percentage |
-| zfs.prefetch_data_hits_rate | hits, misses | events/s |
-| zfs.hash_elements | current, max | elements |
-| zfs.hash_chains | current, max | chains |
-| zfs.trim_bytes | TRIMmed | bytes |
-| zfs.trim_requests | successful, failed, unsupported | requests |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| show zero charts | Do not show charts with zero metrics. | no | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/freebsd.plugin/metadata.yaml b/collectors/freebsd.plugin/metadata.yaml
deleted file mode 100644
index 36fba2430..000000000
--- a/collectors/freebsd.plugin/metadata.yaml
+++ /dev/null
@@ -1,3398 +0,0 @@
-plugin_name: freebsd.plugin
-modules:
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.loadavg
- monitored_instance:
- name: vm.loadavg
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "System Load Average"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.loadavg
- description: Enable or disable load average metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: load_cpu_number
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: number of active CPU cores in the system
- os: "linux"
- - name: load_average_15
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system fifteen-minute load average
- os: "linux"
- - name: load_average_5
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system five-minute load average
- os: "linux"
- - name: load_average_1
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system one-minute load average
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Monitoring for number of threads running or waiting."
- labels: []
- metrics:
- - name: system.load
- description: System Load Average
- unit: "load"
- chart_type: line
- dimensions:
- - name: load1
- - name: load5
- - name: load15
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.vmtotal
- monitored_instance:
- name: vm.vmtotal
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "memory.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect Virtual Memory information from host."
- method_description: "The plugin calls function `sysctl` to collect data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:vm.vmtotal]"
- description: ""
- options:
- description: ""
- folding:
- title: "Config Options"
- enabled: true
- list:
- - name: enable total processes
- description: Number of active processes.
- default_value: yes
- required: false
- - name: processes running
- description: Show number of processes running or blocked.
- default_value: yes
- required: false
- - name: real memory
- description: Memeory used on host.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: active_processes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf
- metric: system.active_processes
- info: system process IDs (PID) space utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show an overall vision about processes running."
- labels: []
- metrics:
- - name: system.active_processes
- description: System Active Processes
- unit: "processes"
- chart_type: line
- dimensions:
- - name: active
- - name: system.processes
- description: System Processes
- unit: "processes"
- chart_type: line
- dimensions:
- - name: running
- - name: blocked
- - name: mem.real
- description: Total Real Memory In Use
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - meta:
- plugin_name: freebsd.plugin
- module_name: kern.cp_time
- monitored_instance:
- name: kern.cp_time
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Total CPU utilization"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- description: "[plugin:freebsd]"
- options:
- description: "The netdata main configuration file."
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: kern.cp_time
- description: Enable or disable Total CPU usage.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 10min_cpu_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU utilization over the last 10 minutes (excluding iowait, nice and steal)
- os: "linux"
- - name: 10min_cpu_iowait
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU iowait time over the last 10 minutes
- os: "linux"
- - name: 20min_steal_cpu
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU steal time over the last 20 minutes
- os: "linux"
- - name: 10min_cpu_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU utilization over the last 10 minutes (excluding nice)
- os: "freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show CPU usage statistics."
- labels: []
- metrics:
- - name: system.cpu
- description: Total CPU utilization
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: nice
- - name: system
- - name: user
- - name: interrupt
- - name: idle
- - name: core
- description: ""
- labels: []
- metrics:
- - name: cpu.cpu
- description: Core utilization
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: nice
- - name: system
- - name: user
- - name: interrupt
- - name: idle
- - meta:
- plugin_name: freebsd.plugin
- module_name: dev.cpu.temperature
- monitored_instance:
- name: dev.cpu.temperature
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Get current CPU temperature"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: dev.cpu.temperature
- description: Enable or disable CPU temperature metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "This metric show latest CPU temperature."
- labels: []
- metrics:
- - name: cpu.temperature
- description: Core temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per core
- - meta:
- plugin_name: freebsd.plugin
- module_name: dev.cpu.0.freq
- monitored_instance:
- name: dev.cpu.0.freq
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Read current CPU Scaling frequency."
- method_description: "Current CPU Scaling Frequency"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "Config options"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file"
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list:
- - name: dev.cpu.0.freq
- description: Enable or disable CPU Scaling frequency metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "The metric shows status of CPU frequency, it is direct affected by system load."
- labels: []
- metrics:
- - name: cpu.scaling_cur_freq
- description: Current CPU Scaling Frequency
- unit: "MHz"
- chart_type: line
- dimensions:
- - name: frequency
- - meta:
- plugin_name: freebsd.plugin
- module_name: hw.intrcnt
- monitored_instance:
- name: hw.intrcnt
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Get total number of interrupts"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: hw.intrcnt
- description: Enable or disable Interrupts metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show system interrupts frequency."
- labels: []
- metrics:
- - name: system.intr
- description: Total Hardware Interrupts
- unit: "interrupts/s"
- chart_type: line
- dimensions:
- - name: interrupts
- - name: system.interrupts
- description: System interrupts
- unit: "interrupts/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per interrupt
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.stats.sys.v_intr
- monitored_instance:
- name: vm.stats.sys.v_intr
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Device interrupts"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: vm.stats.sys.v_intr
- description: Enable or disable device interrupts metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "The metric show device interrupt frequency."
- labels: []
- metrics:
- - name: system.dev_intr
- description: Device Interrupts
- unit: "interrupts/s"
- chart_type: line
- dimensions:
- - name: interrupts
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.stats.sys.v_soft
- monitored_instance:
- name: vm.stats.sys.v_soft
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Software Interrupt"
- method_description: "vm.stats.sys.v_soft"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: vm.stats.sys.v_soft
- description: Enable or disable software inerrupts metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "This metric shows software interrupt frequency."
- labels: []
- metrics:
- - name: system.soft_intr
- description: Software Interrupts
- unit: "interrupts/s"
- chart_type: line
- dimensions:
- - name: interrupts
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.stats.sys.v_swtch
- monitored_instance:
- name: vm.stats.sys.v_swtch
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "CPU context switch"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.stats.sys.v_swtch
- description: Enable or disable CPU context switch metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "The metric count the number of context switches happening on host."
- labels: []
- metrics:
- - name: system.ctxt
- description: CPU Context Switches
- unit: "context switches/s"
- chart_type: line
- dimensions:
- - name: switches
- - name: system.forks
- description: Started Processes
- unit: "processes/s"
- chart_type: line
- dimensions:
- - name: started
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.swap_info
- monitored_instance:
- name: vm.swap_info
- link: ""
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about SWAP memory."
- method_description: "The plugin calls `sysctlnametomib` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.swap_info
- description: Enable or disable SWAP metrics.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: used_swap
- link: https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf
- metric: mem.swap
- info: swap memory utilization
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "This metric shows the SWAP usage."
- labels: []
- metrics:
- - name: mem.swap
- description: System Swap
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - meta:
- plugin_name: freebsd.plugin
- module_name: system.ram
- monitored_instance:
- name: system.ram
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "memory.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Show information about system memory usage."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: system.ram
- description: Enable or disable system RAM metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ram_in_use
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: system.ram
- info: system memory utilization
- os: "linux"
- - name: ram_in_use
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: system.ram
- info: system memory utilization
- os: "freebsd"
- - name: ram_available
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: mem.available
- info: percentage of estimated amount of RAM available for userspace processes, without causing swapping
- os: "linux"
- - name: ram_available
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: mem.available
- info: percentage of estimated amount of RAM available for userspace processes, without causing swapping
- os: "freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "This metric shows RAM usage statistics."
- labels: []
- metrics:
- - name: system.ram
- description: System RAM
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: active
- - name: inactive
- - name: wired
- - name: cache
- - name: laundry
- - name: buffers
- - name: mem.available
- description: Available RAM for applications
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: avail
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.stats.vm.v_swappgs
- monitored_instance:
- name: vm.stats.vm.v_swappgs
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "memory.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "The metric swap amount of data read from and written to SWAP."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.stats.vm.v_swappgs
- description: Enable or disable infoormation about SWAP I/O metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 30min_ram_swapped_out
- link: https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf
- metric: mem.swapio
- info: percentage of the system RAM swapped in the last 30 minutes
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "This metric shows events happening on SWAP."
- labels: []
- metrics:
- - name: mem.swapio
- description: Swap I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: io
- - name: out
- - meta:
- plugin_name: freebsd.plugin
- module_name: vm.stats.vm.v_pgfaults
- monitored_instance:
- name: vm.stats.vm.v_pgfaults
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "memory.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect memory page faults events."
- method_description: "The plugin calls `sysctl` function to collect necessary data"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.stats.vm.v_pgfaults
- description: Enable or disable Memory page fault metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "The number of page faults happened on host."
- labels: []
- metrics:
- - name: mem.pgfaults
- description: Memory Page Faults
- unit: "page faults/s"
- chart_type: line
- dimensions:
- - name: memory
- - name: io_requiring
- - name: cow
- - name: cow_optimized
- - name: in_transit
- - meta:
- plugin_name: freebsd.plugin
- module_name: kern.ipc.sem
- monitored_instance:
- name: kern.ipc.sem
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about semaphore."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: kern.ipc.sem
- description: Enable or disable semaphore metrics.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: semaphores_used
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf
- metric: system.ipc_semaphores
- info: IPC semaphore utilization
- os: "linux"
- - name: semaphore_arrays_used
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf
- metric: system.ipc_semaphore_arrays
- info: IPC semaphore arrays utilization
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics shows counters for semaphores on host."
- labels: []
- metrics:
- - name: system.ipc_semaphores
- description: IPC Semaphores
- unit: "semaphores"
- chart_type: area
- dimensions:
- - name: semaphores
- - name: system.ipc_semaphore_arrays
- description: IPC Semaphore Arrays
- unit: "arrays"
- chart_type: area
- dimensions:
- - name: arrays
- - meta:
- plugin_name: freebsd.plugin
- module_name: kern.ipc.shm
- monitored_instance:
- name: kern.ipc.shm
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "memory.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect shared memory information."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: kern.ipc.shm
- description: Enable or disable shared memory metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics give status about current shared memory segments."
- labels: []
- metrics:
- - name: system.ipc_shared_mem_segs
- description: IPC Shared Memory Segments
- unit: "segments"
- chart_type: area
- dimensions:
- - name: segments
- - name: system.ipc_shared_mem_size
- description: IPC Shared Memory Segments Size
- unit: "KiB"
- chart_type: area
- dimensions:
- - name: allocated
- - meta:
- plugin_name: freebsd.plugin
- module_name: kern.ipc.msq
- monitored_instance:
- name: kern.ipc.msq
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect number of IPC message Queues"
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: kern.ipc.msq
- description: Enable or disable IPC message queue metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show statistics IPC messages statistics."
- labels: []
- metrics:
- - name: system.ipc_msq_queues
- description: Number of IPC Message Queues
- unit: "queues"
- chart_type: area
- dimensions:
- - name: queues
- - name: system.ipc_msq_messages
- description: Number of Messages in IPC Message Queues
- unit: "messages"
- chart_type: area
- dimensions:
- - name: messages
- - name: system.ipc_msq_size
- description: Size of IPC Message Queues
- unit: "bytes"
- chart_type: line
- dimensions:
- - name: allocated
- - name: used
- - meta:
- plugin_name: freebsd.plugin
- module_name: uptime
- monitored_instance:
- name: uptime
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Show period of time server is up."
- method_description: "The plugin calls `clock_gettime` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: vm.loadavg
- description: Enable or disable load average metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "How long the system is running."
- labels: []
- metrics:
- - name: system.uptime
- description: System Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.isr
- monitored_instance:
- name: net.isr
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "freebsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about system softnet stat."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.isr]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: netisr
- description: Enable or disable general vision about softnet stat metrics.
- default_value: yes
- required: false
- - name: netisr per core
- description: Enable or disable softnet stat metric per core.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 1min_netdev_backlog_exceeded
- link: https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf
- metric: system.softnet_stat
- info: average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog
- os: "linux"
- - name: 1min_netdev_budget_ran_outs
- link: https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf
- metric: system.softnet_stat
- info:
- average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last
- minute (this can be a cause for dropped packets)
- os: "linux"
- - name: 10min_netisr_backlog_exceeded
- link: https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf
- metric: system.softnet_stat
- info: average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets)
- os: "freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show statistics about softnet stats."
- labels: []
- metrics:
- - name: system.softnet_stat
- description: System softnet_stat
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: dispatched
- - name: hybrid_dispatched
- - name: qdrops
- - name: queued
- - name: core
- description: ""
- labels: []
- metrics:
- - name: cpu.softnet_stat
- description: Per CPU netisr statistics
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: dispatched
- - name: hybrid_dispatched
- - name: qdrops
- - name: queued
- - meta:
- plugin_name: freebsd.plugin
- module_name: devstat
- monitored_instance:
- name: devstat
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information per hard disk available on host."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:kern.devstat]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: enable new disks detected at runtime
- description: Enable or disable possibility to detect new disks.
- default_value: auto
- required: false
- - name: performance metrics for pass devices
- description: Enable or disable metrics for disks with type `PASS`.
- default_value: auto
- required: false
- - name: total bandwidth for all disks
- description: Enable or disable total bandwidth metric for all disks.
- default_value: yes
- required: false
- - name: bandwidth for all disks
- description: Enable or disable bandwidth for all disks metric.
- default_value: auto
- required: false
- - name: operations for all disks
- description: Enable or disable operations for all disks metric.
- default_value: auto
- required: false
- - name: queued operations for all disks
- description: Enable or disable queued operations for all disks metric.
- default_value: auto
- required: false
- - name: utilization percentage for all disks
- description: Enable or disable utilization percentage for all disks metric.
- default_value: auto
- required: false
- - name: i/o time for all disks
- description: Enable or disable I/O time for all disks metric.
- default_value: auto
- required: false
- - name: average completed i/o time for all disks
- description: Enable or disable average completed I/O time for all disks metric.
- default_value: auto
- required: false
- - name: average completed i/o bandwidth for all disks
- description: Enable or disable average completed I/O bandwidth for all disks metric.
- default_value: auto
- required: false
- - name: average service time for all disks
- description: Enable or disable average service time for all disks metric.
- default_value: auto
- required: false
- - name: disable by default disks matching
- description: Do not create charts for disks listed.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 10min_disk_utilization
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.util
- info: average percentage of time ${label:device} disk was busy over the last 10 minutes
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics give a general vision about I/O events on disks."
- labels: []
- metrics:
- - name: system.io
- description: Disk I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: io
- - name: out
- - name: disk
- description: ""
- labels: []
- metrics:
- - name: disk.io
- description: Disk I/O Bandwidth
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: frees
- - name: disk.ops
- description: Disk Completed I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: other
- - name: frees
- - name: disk.qops
- description: Disk Current I/O Operations
- unit: "operations"
- chart_type: line
- dimensions:
- - name: operations
- - name: disk.util
- description: Disk Utilization Time
- unit: "% of time working"
- chart_type: line
- dimensions:
- - name: utilization
- - name: disk.iotime
- description: Disk Total I/O Time
- unit: "milliseconds/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: other
- - name: frees
- - name: disk.await
- description: Average Completed I/O Operation Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: other
- - name: frees
- - name: disk.avgsz
- description: Average Completed I/O Operation Bandwidth
- unit: "KiB/operation"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: frees
- - name: disk.svctm
- description: Average Service Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: svctm
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet.tcp.states
- monitored_instance:
- name: net.inet.tcp.states
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ""
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: net.inet.tcp.states
- description: Enable or disable TCP state metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: tcp_connections
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf
- metric: ipv4.tcpsock
- info: IPv4 TCP connections utilization
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "A counter for TCP connections."
- labels: []
- metrics:
- - name: ipv4.tcpsock
- description: IPv4 TCP Connections
- unit: "active connections"
- chart_type: line
- dimensions:
- - name: connections
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet.tcp.stats
- monitored_instance:
- name: net.inet.tcp.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect overall information about TCP connections."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet.tcp.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: ipv4 TCP packets
- description: Enable or disable ipv4 TCP packets metric.
- default_value: yes
- required: false
- - name: ipv4 TCP errors
- description: Enable or disable pv4 TCP errors metric.
- default_value: yes
- required: false
- - name: ipv4 TCP handshake issues
- description: Enable or disable ipv4 TCP handshake issue metric.
- default_value: yes
- required: false
- - name: TCP connection aborts
- description: Enable or disable TCP connection aborts metric.
- default_value: auto
- required: false
- - name: TCP out-of-order queue
- description: Enable or disable TCP out-of-order queue metric.
- default_value: auto
- required: false
- - name: TCP SYN cookies
- description: Enable or disable TCP SYN cookies metric.
- default_value: auto
- required: false
- - name: TCP listen issues
- description: Enable or disable TCP listen issues metric.
- default_value: auto
- required: false
- - name: ECN packets
- description: Enable or disable ECN packets metric.
- default_value: auto
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 1m_ipv4_tcp_resets_sent
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
- info: average number of sent TCP RESETS over the last minute
- os: "linux"
- - name: 10s_ipv4_tcp_resets_sent
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
- info:
- average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has
- crashed. Netdata will not send a clear notification for this alarm.
- os: "linux"
- - name: 1m_ipv4_tcp_resets_received
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
- info: average number of received TCP RESETS over the last minute
- os: "linux freebsd"
- - name: 10s_ipv4_tcp_resets_received
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ipv4.tcphandshake
- info:
- average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed.
- Netdata will not send a clear notification for this alarm.
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show TCP connections statistics."
- labels: []
- metrics:
- - name: ipv4.tcppackets
- description: IPv4 TCP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.tcperrors
- description: IPv4 TCP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrs
- - name: InCsumErrors
- - name: RetransSegs
- - name: ipv4.tcphandshake
- description: IPv4 TCP Handshake Issues
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: EstabResets
- - name: ActiveOpens
- - name: PassiveOpens
- - name: AttemptFails
- - name: ipv4.tcpconnaborts
- description: TCP Connection Aborts
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: baddata
- - name: userclosed
- - name: nomemory
- - name: timeout
- - name: linger
- - name: ipv4.tcpofo
- description: TCP Out-Of-Order Queue
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: inqueue
- - name: ipv4.tcpsyncookies
- description: TCP SYN Cookies
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: failed
- - name: ipv4.tcplistenissues
- description: TCP Listen Socket Issues
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: overflows
- - name: ipv4.ecnpkts
- description: IPv4 ECN Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InCEPkts
- - name: InECT0Pkts
- - name: InECT1Pkts
- - name: OutECT0Pkts
- - name: OutECT1Pkts
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet.udp.stats
- monitored_instance:
- name: net.inet.udp.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about UDP connections."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet.udp.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: ipv4 UDP packets
- description: Enable or disable ipv4 UDP packets metric.
- default_value: yes
- required: false
- - name: ipv4 UDP errors
- description: Enable or disable ipv4 UDP errors metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 1m_ipv4_udp_receive_buffer_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf
- metric: ipv4.udperrors
- info: average number of UDP receive buffer errors over the last minute
- os: "linux freebsd"
- - name: 1m_ipv4_udp_send_buffer_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf
- metric: ipv4.udperrors
- info: average number of UDP send buffer errors over the last minute
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show UDP connections statistics."
- labels: []
- metrics:
- - name: ipv4.udppackets
- description: IPv4 UDP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.udperrors
- description: IPv4 UDP Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: NoPorts
- - name: RcvbufErrors
- - name: InCsumErrors
- - name: IgnoredMulti
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet.icmp.stats
- monitored_instance:
- name: net.inet.icmp.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about ICMP traffic."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet.icmp.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: IPv4 ICMP packets
- description: Enable or disable IPv4 ICMP packets metric.
- default_value: yes
- required: false
- - name: IPv4 ICMP error
- description: Enable or disable IPv4 ICMP error metric.
- default_value: yes
- required: false
- - name: IPv4 ICMP messages
- description: Enable or disable IPv4 ICMP messages metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show ICMP connections statistics."
- labels: []
- metrics:
- - name: ipv4.icmp
- description: IPv4 ICMP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.icmp_errors
- description: IPv4 ICMP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: ipv4.icmpmsg
- description: IPv4 ICMP Messages
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InEchoReps
- - name: OutEchoReps
- - name: InEchos
- - name: OutEchos
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet.ip.stats
- monitored_instance:
- name: net.inet.ip.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect IP stats"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet.ip.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: ipv4 packets
- description: Enable or disable IPv4 packets metric.
- default_value: yes
- required: false
- - name: ipv4 fragments sent
- description: Enable or disable IPv4 fragments sent metric.
- default_value: yes
- required: false
- - name: ipv4 fragments assembly
- description: Enable or disable IPv4 fragments assembly metric.
- default_value: yes
- required: false
- - name: ipv4 errors
- description: Enable or disable IPv4 errors metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show IPv4 connections statistics."
- labels: []
- metrics:
- - name: ipv4.packets
- description: IPv4 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivered
- - name: ipv4.fragsout
- description: IPv4 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: created
- - name: ipv4.fragsin
- description: IPv4 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv4.errors
- description: IPv4 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InHdrErrors
- - name: OutNoRoutes
- - name: InAddrErrors
- - name: InUnknownProtos
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet6.ip6.stats
- monitored_instance:
- name: net.inet6.ip6.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information abou IPv6 stats."
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet6.ip6.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: ipv6 packets
- description: Enable or disable ipv6 packet metric.
- default_value: auto
- required: false
- - name: ipv6 fragments sent
- description: Enable or disable ipv6 fragments sent metric.
- default_value: auto
- required: false
- - name: ipv6 fragments assembly
- description: Enable or disable ipv6 fragments assembly metric.
- default_value: auto
- required: false
- - name: ipv6 errors
- description: Enable or disable ipv6 errors metric.
- default_value: auto
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show general information about IPv6 connections."
- labels: []
- metrics:
- - name: ipv6.packets
- description: IPv6 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivers
- - name: ipv6.fragsout
- description: IPv6 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv6.fragsin
- description: IPv6 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: timeout
- - name: all
- - name: ipv6.errors
- description: IPv6 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InHdrErrors
- - name: InAddrErrors
- - name: InTruncatedPkts
- - name: InNoRoutes
- - name: OutNoRoutes
- - meta:
- plugin_name: freebsd.plugin
- module_name: net.inet6.icmp6.stats
- monitored_instance:
- name: net.inet6.icmp6.stats
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information abou IPv6 ICMP"
- method_description: "The plugin calls `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:net.inet6.icmp6.stats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: icmp
- description: Enable or disable ICMP metric.
- default_value: auto
- required: false
- - name: icmp redirects
- description: Enable or disable ICMP redirects metric.
- default_value: auto
- required: false
- - name: icmp errors
- description: Enable or disable ICMP errors metric.
- default_value: auto
- required: false
- - name: icmp echos
- description: Enable or disable ICMP echos metric.
- default_value: auto
- required: false
- - name: icmp router
- description: Enable or disable ICMP router metric.
- default_value: auto
- required: false
- - name: icmp neighbor
- description: Enable or disable ICMP neighbor metric.
- default_value: auto
- required: false
- - name: icmp types
- description: Enable or disable ICMP types metric.
- default_value: auto
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Collect IPv6 ICMP traffic statistics."
- labels: []
- metrics:
- - name: ipv6.icmp
- description: IPv6 ICMP Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmpredir
- description: IPv6 ICMP Redirects
- unit: "redirects/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmperrors
- description: IPv6 ICMP Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: InDestUnreachs
- - name: InPktTooBigs
- - name: InTimeExcds
- - name: InParmProblems
- - name: OutDestUnreachs
- - name: OutTimeExcds
- - name: OutParmProblems
- - name: ipv6.icmpechos
- description: IPv6 ICMP Echo
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InEchos
- - name: OutEchos
- - name: InEchoReplies
- - name: OutEchoReplies
- - name: ipv6.icmprouter
- description: IPv6 Router Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmpneighbor
- description: IPv6 Neighbor Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmptypes
- description: IPv6 ICMP Types
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InType1
- - name: InType128
- - name: InType129
- - name: InType136
- - name: OutType1
- - name: OutType128
- - name: OutType129
- - name: OutType133
- - name: OutType135
- - name: OutType143
- - meta:
- plugin_name: freebsd.plugin
- module_name: ipfw
- monitored_instance:
- name: ipfw
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "firewall.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information about FreeBSD firewall."
- method_description: "The plugin uses RAW socket to communicate with kernel and collect data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:ipfw]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: counters for static rules
- description: Enable or disable counters for static rules metric.
- default_value: yes
- required: false
- - name: number of dynamic rules
- description: Enable or disable number of dynamic rules metric.
- default_value: yes
- required: false
- - name: allocated memory
- description: Enable or disable allocated memory metric.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Theese metrics show FreeBSD firewall statistics."
- labels: []
- metrics:
- - name: ipfw.mem
- description: Memory allocated by rules
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: dynamic
- - name: static
- - name: ipfw.packets
- description: Packets
- unit: "packets/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per static rule
- - name: ipfw.bytes
- description: Bytes
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per static rule
- - name: ipfw.active
- description: Active rules
- unit: "rules"
- chart_type: stacked
- dimensions:
- - name: a dimension per dynamic rule
- - name: ipfw.expired
- description: Expired rules
- unit: "rules"
- chart_type: stacked
- dimensions:
- - name: a dimension per dynamic rule
- - meta:
- plugin_name: freebsd.plugin
- module_name: getifaddrs
- monitored_instance:
- name: getifaddrs
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "network.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect traffic per network interface."
- method_description: "The plugin calls `getifaddrs` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:getifaddrs]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: enable new interfaces detected at runtime
- description: Enable or disable possibility to discover new interface after plugin starts.
- default_value: auto
- required: false
- - name: total bandwidth for physical interfaces
- description: Enable or disable total bandwidth for physical interfaces metric.
- default_value: auto
- required: false
- - name: total packets for physical interfaces
- description: Enable or disable total packets for physical interfaces metric.
- default_value: auto
- required: false
- - name: total bandwidth for ipv4 interface
- description: Enable or disable total bandwidth for IPv4 interface metric.
- default_value: auto
- required: false
- - name: total bandwidth for ipv6 interfaces
- description: Enable or disable total bandwidth for ipv6 interfaces metric.
- default_value: auto
- required: false
- - name: bandwidth for all interfaces
- description: Enable or disable bandwidth for all interfaces metric.
- default_value: auto
- required: false
- - name: packets for all interfaces
- description: Enable or disable packets for all interfaces metric.
- default_value: auto
- required: false
- - name: errors for all interfaces
- description: Enable or disable errors for all interfaces metric.
- default_value: auto
- required: false
- - name: drops for all interfaces
- description: Enable or disable drops for all interfaces metric.
- default_value: auto
- required: false
- - name: collisions for all interface
- description: Enable or disable collisions for all interface metric.
- default_value: auto
- required: false
- - name: disable by default interfaces matching
- description: Do not display data for intterfaces listed.
- default_value: lo*
- required: false
- - name: set physical interfaces for system.net
- description: Do not show network traffic for listed interfaces.
- default_value: igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc*
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: interface_speed
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.net
- info: network interface ${label:device} current speed
- os: "*"
- - name: inbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "*"
- - name: outbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "*"
- - name: 1m_received_packets_rate
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
- info: average number of packets received by the network interface ${label:device} over the last minute
- os: "linux freebsd"
- - name: 10s_received_packets_storm
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
- info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute
- os: "linux freebsd"
- - name: interface_inbound_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.errors
- info: number of inbound errors for the network interface ${label:device} in the last 10 minutes
- os: "freebsd"
- - name: interface_outbound_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.errors
- info: number of outbound errors for the network interface ${label:device} in the last 10 minutes
- os: "freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "General overview about network traffic."
- labels: []
- metrics:
- - name: system.net
- description: Network Traffic
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: system.packets
- description: Network Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast_received
- - name: multicast_sent
- - name: system.ipv4
- description: IPv4 Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: system.ipv6
- description: IPv6 Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: network device
- description: ""
- labels: []
- metrics:
- - name: net.net
- description: Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: net.packets
- description: Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast_received
- - name: multicast_sent
- - name: net.errors
- description: Interface Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: net.drops
- description: Interface Drops
- unit: "drops/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: net.events
- description: Network Interface Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: collisions
- - meta:
- plugin_name: freebsd.plugin
- module_name: getmntinfo
- monitored_instance:
- name: getmntinfo
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect information per mount point."
- method_description: "The plugin calls `getmntinfo` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:getmntinfo]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: enable new mount points detected at runtime
- description: Cheeck new mount points during runtime.
- default_value: auto
- required: false
- - name: space usage for all disks
- description: Enable or disable space usage for all disks metric.
- default_value: auto
- required: false
- - name: inodes usage for all disks
- description: Enable or disable inodes usage for all disks metric.
- default_value: auto
- required: false
- - name: exclude space metrics on paths
- description: Do not show metrics for listed paths.
- default_value: /proc/*
- required: false
- - name: exclude space metrics on filesystems
- description: Do not monitor listed filesystems.
- default_value: autofs procfs subfs devfs none
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: disk_space_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.space
- info: disk ${label:mount_point} space utilization
- os: "linux freebsd"
- - name: disk_inode_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.inodes
- info: disk ${label:mount_point} inode utilization
- os: "linux freebsd"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: mount point
- description: "These metrics show detailss about mount point usages."
- labels: []
- metrics:
- - name: disk.space
- description: Disk Space Usage for {mounted dir} [{mounted filesystem}]
- unit: "GiB"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
- - name: disk.inodes
- description: Disk Files (inodes) Usage for {mounted dir} [{mounted filesystem}]
- unit: "inodes"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
- - meta:
- plugin_name: freebsd.plugin
- module_name: zfs
- monitored_instance:
- name: zfs
- link: "https://www.freebsd.org/"
- categories:
- - data-collection.freebsd
- icon_filename: "filesystem.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Collect metrics for ZFS filesystem"
- method_description: "The plugin uses `sysctl` function to collect necessary data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freebsd:zfs_arcstats]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: show zero charts
- description: Do not show charts with zero metrics.
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: zfs_memory_throttle
- link: https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf
- metric: zfs.memory_ops
- info: number of times ZFS had to limit the ARC growth in the last 10 minutes
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show detailed information about ZFS filesystem."
- labels: []
- metrics:
- - name: zfs.arc_size
- description: ZFS ARC Size
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: arcsz
- - name: target
- - name: min
- - name: max
- - name: zfs.l2_size
- description: ZFS L2 ARC Size
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: actual
- - name: size
- - name: zfs.reads
- description: ZFS Reads
- unit: "reads/s"
- chart_type: area
- dimensions:
- - name: arc
- - name: demand
- - name: prefetch
- - name: metadata
- - name: l2
- - name: zfs.bytes
- description: ZFS ARC L2 Read/Write Rate
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: zfs.hits
- description: ZFS ARC Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.hits_rate
- description: ZFS ARC Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.dhits
- description: ZFS Demand Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.dhits_rate
- description: ZFS Demand Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.phits
- description: ZFS Prefetch Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.phits_rate
- description: ZFS Prefetch Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.mhits
- description: ZFS Metadata Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.mhits_rate
- description: ZFS Metadata Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.l2hits
- description: ZFS L2 Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.l2hits_rate
- description: ZFS L2 Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.list_hits
- description: ZFS List Hits
- unit: "hits/s"
- chart_type: area
- dimensions:
- - name: mfu
- - name: mfu_ghost
- - name: mru
- - name: mru_ghost
- - name: zfs.arc_size_breakdown
- description: ZFS ARC Size Breakdown
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: recent
- - name: frequent
- - name: zfs.memory_ops
- description: ZFS Memory Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: throttled
- - name: zfs.important_ops
- description: ZFS Important Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: evict_skip
- - name: deleted
- - name: mutex_miss
- - name: hash_collisions
- - name: zfs.actual_hits
- description: ZFS Actual Cache Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.actual_hits_rate
- description: ZFS Actual Cache Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.demand_data_hits
- description: ZFS Data Demand Efficiency
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.demand_data_hits_rate
- description: ZFS Data Demand Efficiency Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.prefetch_data_hits
- description: ZFS Data Prefetch Efficiency
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.prefetch_data_hits_rate
- description: ZFS Data Prefetch Efficiency Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.hash_elements
- description: ZFS ARC Hash Elements
- unit: "elements"
- chart_type: line
- dimensions:
- - name: current
- - name: max
- - name: zfs.hash_chains
- description: ZFS ARC Hash Chains
- unit: "chains"
- chart_type: line
- dimensions:
- - name: current
- - name: max
- - name: zfs.trim_bytes
- description: Successfully TRIMmed bytes
- unit: "bytes"
- chart_type: line
- dimensions:
- - name: TRIMmed
- - name: zfs.trim_requests
- description: TRIM requests
- unit: "requests"
- chart_type: line
- dimensions:
- - name: successful
- - name: failed
- - name: unsupported
diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c
deleted file mode 100644
index 976fe26fb..000000000
--- a/collectors/freebsd.plugin/plugin_freebsd.c
+++ /dev/null
@@ -1,136 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_freebsd.h"
-
-static struct freebsd_module {
- const char *name;
- const char *dim;
-
- int enabled;
-
- int (*func)(int update_every, usec_t dt);
-
- RRDDIM *rd;
-
-} freebsd_modules[] = {
-
- // system metrics
- {.name = "kern.cp_time", .dim = "cp_time", .enabled = 1, .func = do_kern_cp_time},
- {.name = "vm.loadavg", .dim = "loadavg", .enabled = 1, .func = do_vm_loadavg},
- {.name = "system.ram", .dim = "system_ram", .enabled = 1, .func = do_system_ram},
- {.name = "vm.swap_info", .dim = "swap", .enabled = 1, .func = do_vm_swap_info},
- {.name = "vm.stats.vm.v_swappgs", .dim = "swap_io", .enabled = 1, .func = do_vm_stats_sys_v_swappgs},
- {.name = "vm.vmtotal", .dim = "vmtotal", .enabled = 1, .func = do_vm_vmtotal},
- {.name = "vm.stats.vm.v_forks", .dim = "forks", .enabled = 1, .func = do_vm_stats_sys_v_forks},
- {.name = "vm.stats.sys.v_swtch", .dim = "context_swtch", .enabled = 1, .func = do_vm_stats_sys_v_swtch},
- {.name = "hw.intrcnt", .dim = "hw_intr", .enabled = 1, .func = do_hw_intcnt},
- {.name = "vm.stats.sys.v_intr", .dim = "dev_intr", .enabled = 1, .func = do_vm_stats_sys_v_intr},
- {.name = "vm.stats.sys.v_soft", .dim = "soft_intr", .enabled = 1, .func = do_vm_stats_sys_v_soft},
- {.name = "net.isr", .dim = "net_isr", .enabled = 1, .func = do_net_isr},
- {.name = "kern.ipc.sem", .dim = "semaphores", .enabled = 1, .func = do_kern_ipc_sem},
- {.name = "kern.ipc.shm", .dim = "shared_memory", .enabled = 1, .func = do_kern_ipc_shm},
- {.name = "kern.ipc.msq", .dim = "message_queues", .enabled = 1, .func = do_kern_ipc_msq},
- {.name = "uptime", .dim = "uptime", .enabled = 1, .func = do_uptime},
-
- // memory metrics
- {.name = "vm.stats.vm.v_pgfaults", .dim = "pgfaults", .enabled = 1, .func = do_vm_stats_sys_v_pgfaults},
-
- // CPU metrics
- {.name = "kern.cp_times", .dim = "cp_times", .enabled = 1, .func = do_kern_cp_times},
- {.name = "dev.cpu.temperature", .dim = "cpu_temperature", .enabled = 1, .func = do_dev_cpu_temperature},
- {.name = "dev.cpu.0.freq", .dim = "cpu_frequency", .enabled = 1, .func = do_dev_cpu_0_freq},
-
- // disk metrics
- {.name = "kern.devstat", .dim = "kern_devstat", .enabled = 1, .func = do_kern_devstat},
- {.name = "getmntinfo", .dim = "getmntinfo", .enabled = 1, .func = do_getmntinfo},
-
- // network metrics
- {.name = "net.inet.tcp.states", .dim = "tcp_states", .enabled = 1, .func = do_net_inet_tcp_states},
- {.name = "net.inet.tcp.stats", .dim = "tcp_stats", .enabled = 1, .func = do_net_inet_tcp_stats},
- {.name = "net.inet.udp.stats", .dim = "udp_stats", .enabled = 1, .func = do_net_inet_udp_stats},
- {.name = "net.inet.icmp.stats", .dim = "icmp_stats", .enabled = 1, .func = do_net_inet_icmp_stats},
- {.name = "net.inet.ip.stats", .dim = "ip_stats", .enabled = 1, .func = do_net_inet_ip_stats},
- {.name = "net.inet6.ip6.stats", .dim = "ip6_stats", .enabled = 1, .func = do_net_inet6_ip6_stats},
- {.name = "net.inet6.icmp6.stats", .dim = "icmp6_stats", .enabled = 1, .func = do_net_inet6_icmp6_stats},
-
- // network interfaces metrics
- {.name = "getifaddrs", .dim = "getifaddrs", .enabled = 1, .func = do_getifaddrs},
-
- // ZFS metrics
- {.name = "kstat.zfs.misc.arcstats", .dim = "arcstats", .enabled = 1, .func = do_kstat_zfs_misc_arcstats},
- {.name = "kstat.zfs.misc.zio_trim", .dim = "trim", .enabled = 1, .func = do_kstat_zfs_misc_zio_trim},
-
- // ipfw metrics
- {.name = "ipfw", .dim = "ipfw", .enabled = 1, .func = do_ipfw},
-
- // the terminator of this array
- {.name = NULL, .dim = NULL, .enabled = 0, .func = NULL}
-};
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 33
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 33
-#endif
-
-static void freebsd_main_cleanup(void *ptr)
-{
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *freebsd_main(void *ptr)
-{
- worker_register("FREEBSD");
-
- netdata_thread_cleanup_push(freebsd_main_cleanup, ptr);
-
- // initialize FreeBSD plugin
- if (freebsd_plugin_init())
- netdata_cleanup_and_exit(1);
-
- // check the enabled status for each module
- int i;
- for (i = 0; freebsd_modules[i].name; i++) {
- struct freebsd_module *pm = &freebsd_modules[i];
-
- pm->enabled = config_get_boolean("plugin:freebsd", pm->name, pm->enabled);
- pm->rd = NULL;
-
- worker_register_job_name(i, freebsd_modules[i].dim);
- }
-
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- while (!netdata_exit) {
- worker_is_idle();
-
- usec_t hb_dt = heartbeat_next(&hb, step);
-
- if (unlikely(netdata_exit))
- break;
-
- for (i = 0; freebsd_modules[i].name; i++) {
- struct freebsd_module *pm = &freebsd_modules[i];
- if (unlikely(!pm->enabled))
- continue;
-
- netdata_log_debug(D_PROCNETDEV_LOOP, "FREEBSD calling %s.", pm->name);
-
- worker_is_busy(i);
- pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
-
- if (unlikely(netdata_exit))
- break;
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/freebsd.plugin/plugin_freebsd.h b/collectors/freebsd.plugin/plugin_freebsd.h
deleted file mode 100644
index af7d0822e..000000000
--- a/collectors/freebsd.plugin/plugin_freebsd.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_FREEBSD_H
-#define NETDATA_PLUGIN_FREEBSD_H 1
-
-#include "daemon/common.h"
-
-#include <sys/sysctl.h>
-
-#define KILO_FACTOR 1024
-#define MEGA_FACTOR 1048576 // 1024 * 1024
-#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
-
-#define MAX_INT_DIGITS 10 // maximum number of digits for int
-
-int freebsd_plugin_init();
-
-int do_vm_loadavg(int update_every, usec_t dt);
-int do_vm_vmtotal(int update_every, usec_t dt);
-int do_kern_cp_time(int update_every, usec_t dt);
-int do_kern_cp_times(int update_every, usec_t dt);
-int do_dev_cpu_temperature(int update_every, usec_t dt);
-int do_dev_cpu_0_freq(int update_every, usec_t dt);
-int do_hw_intcnt(int update_every, usec_t dt);
-int do_vm_stats_sys_v_intr(int update_every, usec_t dt);
-int do_vm_stats_sys_v_soft(int update_every, usec_t dt);
-int do_vm_stats_sys_v_swtch(int update_every, usec_t dt);
-int do_vm_stats_sys_v_forks(int update_every, usec_t dt);
-int do_vm_swap_info(int update_every, usec_t dt);
-int do_system_ram(int update_every, usec_t dt);
-int do_vm_stats_sys_v_swappgs(int update_every, usec_t dt);
-int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt);
-int do_kern_ipc_sem(int update_every, usec_t dt);
-int do_kern_ipc_shm(int update_every, usec_t dt);
-int do_kern_ipc_msq(int update_every, usec_t dt);
-int do_uptime(int update_every, usec_t dt);
-int do_net_isr(int update_every, usec_t dt);
-int do_net_inet_tcp_states(int update_every, usec_t dt);
-int do_net_inet_tcp_stats(int update_every, usec_t dt);
-int do_net_inet_udp_stats(int update_every, usec_t dt);
-int do_net_inet_icmp_stats(int update_every, usec_t dt);
-int do_net_inet_ip_stats(int update_every, usec_t dt);
-int do_net_inet6_ip6_stats(int update_every, usec_t dt);
-int do_net_inet6_icmp6_stats(int update_every, usec_t dt);
-int do_getifaddrs(int update_every, usec_t dt);
-int do_getmntinfo(int update_every, usec_t dt);
-int do_kern_devstat(int update_every, usec_t dt);
-int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt);
-int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt);
-int do_ipfw(int update_every, usec_t dt);
-
-// metrics that need to be shared among data collectors
-extern unsigned long long zfs_arcstats_shrinkable_cache_size_bytes;
-
-#endif /* NETDATA_PLUGIN_FREEBSD_H */
diff --git a/collectors/freeipmi.plugin/Makefile.am b/collectors/freeipmi.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/freeipmi.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md
deleted file mode 120000
index f55ebf73d..000000000
--- a/collectors/freeipmi.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/intelligent_platform_management_interface_ipmi.md \ No newline at end of file
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
deleted file mode 100644
index 6ec9b698b..000000000
--- a/collectors/freeipmi.plugin/freeipmi_plugin.c
+++ /dev/null
@@ -1,2094 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-/*
- * netdata freeipmi.plugin
- * Copyright (C) 2023 Netdata Inc.
- * GPL v3+
- *
- * Based on:
- * ipmimonitoring-sensors.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
- * ipmimonitoring-sel.c,v 1.51 2016/11/02 23:46:24 chu11 Exp
- *
- * Copyright (C) 2007-2015 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2006-2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Albert Chu <chu11@llnl.gov>
- * UCRL-CODE-222073
- */
-
-// ----------------------------------------------------------------------------
-// BEGIN NETDATA CODE
-
-// #define NETDATA_TIMING_REPORT 1
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#define FREEIPMI_GLOBAL_FUNCTION_SENSORS() do { \
- fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"ipmi-sensors\" %d \"%s\"\n", 5, "Displays current sensor state and readings"); \
- } while(0)
-
-// component names, based on our patterns
-#define NETDATA_SENSOR_COMPONENT_MEMORY_MODULE "Memory Module"
-#define NETDATA_SENSOR_COMPONENT_MEMORY "Memory"
-#define NETDATA_SENSOR_COMPONENT_PROCESSOR "Processor"
-#define NETDATA_SENSOR_COMPONENT_IPU "Image Processor"
-#define NETDATA_SENSOR_COMPONENT_STORAGE "Storage"
-#define NETDATA_SENSOR_COMPONENT_MOTHERBOARD "Motherboard"
-#define NETDATA_SENSOR_COMPONENT_NETWORK "Network"
-#define NETDATA_SENSOR_COMPONENT_POWER_SUPPLY "Power Supply"
-#define NETDATA_SENSOR_COMPONENT_SYSTEM "System"
-#define NETDATA_SENSOR_COMPONENT_PERIPHERAL "Peripheral"
-
-// netdata plugin defaults
-#define SENSORS_DICT_KEY_SIZE 2048 // the max size of the key for the dictionary of sensors
-#define SPEED_TEST_ITERATIONS 5 // how many times to repeat data collection to decide latency
-#define IPMI_SENSORS_DASHBOARD_PRIORITY 90000 // the priority of the sensors charts on the dashboard
-#define IPMI_SEL_DASHBOARD_PRIORITY 99000 // the priority of the SEL events chart on the dashboard
-#define IPMI_SENSORS_MIN_UPDATE_EVERY 5 // the minimum data collection frequency for sensors
-#define IPMI_SEL_MIN_UPDATE_EVERY 30 // the minimum data collection frequency for SEL events
-#define IPMI_ENABLE_SEL_BY_DEFAULT true // true/false, to enable/disable SEL by default
-#define IPMI_RESTART_EVERY_SECONDS 14400 // restart the plugin every this many seconds
- // this is to prevent possible bugs/leaks in ipmi libraries
-#define IPMI_RESTART_IF_SENSORS_DONT_ITERATE_EVERY_SECONDS (10 * 60) // stale data collection detection time
-
-// forward definition of functions and structures
-struct netdata_ipmi_state;
-static void netdata_update_ipmi_sensor_reading(
- int record_id
- , int sensor_number
- , int sensor_type
- , int sensor_state
- , int sensor_units
- , int sensor_reading_type
- , char *sensor_name
- , void *sensor_reading
- , int event_reading_type_code
- , int sensor_bitmask_type
- , int sensor_bitmask
- , char **sensor_bitmask_strings
- , struct netdata_ipmi_state *state
-);
-static void netdata_update_ipmi_sel_events_count(struct netdata_ipmi_state *state, uint32_t events);
-
-// END NETDATA CODE
-// ----------------------------------------------------------------------------
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <assert.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/time.h>
-
-#include <ipmi_monitoring.h>
-#include <ipmi_monitoring_bitmasks.h>
-#include <ipmi_monitoring_offsets.h>
-
-/* Communication Configuration - Initialize accordingly */
-
-static netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER;
-static bool function_plugin_should_exit = false;
-
-int update_every = IPMI_SENSORS_MIN_UPDATE_EVERY; // this is the minimum update frequency
-int update_every_sel = IPMI_SEL_MIN_UPDATE_EVERY; // this is the minimum update frequency for SEL events
-
-/* Hostname, NULL for In-band communication, non-null for a hostname */
-char *hostname = NULL;
-
-/* In-band Communication Configuration */
-int driver_type = -1; // IPMI_MONITORING_DRIVER_TYPE_KCS, etc. or -1 for default
-int disable_auto_probe = 0; /* probe for in-band device */
-unsigned int driver_address = 0; /* not used if probing */
-unsigned int register_spacing = 0; /* not used if probing */
-char *driver_device = NULL; /* not used if probing */
-
-/* Out-of-band Communication Configuration */
-int protocol_version = -1; // IPMI_MONITORING_PROTOCOL_VERSION_1_5, etc. or -1 for default
-char *username = "";
-char *password = "";
-unsigned char *k_g = NULL;
-unsigned int k_g_len = 0;
-int privilege_level = -1; // IPMI_MONITORING_PRIVILEGE_LEVEL_USER, etc. or -1 for default
-int authentication_type = -1; // IPMI_MONITORING_AUTHENTICATION_TYPE_MD5, etc. or -1 for default
-int cipher_suite_id = -1; /* 0 or -1 for default */
-int session_timeout = 0; /* 0 for default */
-int retransmission_timeout = 0; /* 0 for default */
-
-/* Workarounds - specify workaround flags if necessary */
-unsigned int workaround_flags = 0;
-
-/* Set to an appropriate alternate if desired */
-char *sdr_cache_directory = "/tmp";
-char *sdr_sensors_cache_format = ".netdata-freeipmi-sensors-%H-on-%L.sdr";
-char *sdr_sel_cache_format = ".netdata-freeipmi-sel-%H-on-%L.sdr";
-char *sensor_config_file = NULL;
-char *sel_config_file = NULL;
-
-// controlled via command line options
-unsigned int global_sel_flags = IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
-unsigned int global_sensor_reading_flags = IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING|IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
-bool remove_reread_sdr_after_first_use = true;
-
-/* Initialization flags
- *
- * Most commonly bitwise OR IPMI_MONITORING_FLAGS_DEBUG and/or
- * IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS for extra debugging
- * information.
- */
-unsigned int ipmimonitoring_init_flags = 0;
-
-// ----------------------------------------------------------------------------
-// functions common to sensors and SEL
-
-static void initialize_ipmi_config (struct ipmi_monitoring_ipmi_config *ipmi_config) {
- fatal_assert(ipmi_config);
-
- ipmi_config->driver_type = driver_type;
- ipmi_config->disable_auto_probe = disable_auto_probe;
- ipmi_config->driver_address = driver_address;
- ipmi_config->register_spacing = register_spacing;
- ipmi_config->driver_device = driver_device;
-
- ipmi_config->protocol_version = protocol_version;
- ipmi_config->username = username;
- ipmi_config->password = password;
- ipmi_config->k_g = k_g;
- ipmi_config->k_g_len = k_g_len;
- ipmi_config->privilege_level = privilege_level;
- ipmi_config->authentication_type = authentication_type;
- ipmi_config->cipher_suite_id = cipher_suite_id;
- ipmi_config->session_timeout_len = session_timeout;
- ipmi_config->retransmission_timeout_len = retransmission_timeout;
-
- ipmi_config->workaround_flags = workaround_flags;
-}
-
-static const char *netdata_ipmi_get_sensor_type_string (int sensor_type, const char **component) {
- switch (sensor_type) {
- case IPMI_MONITORING_SENSOR_TYPE_RESERVED:
- return ("Reserved");
-
- case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
- return ("Temperature");
-
- case IPMI_MONITORING_SENSOR_TYPE_VOLTAGE:
- return ("Voltage");
-
- case IPMI_MONITORING_SENSOR_TYPE_CURRENT:
- return ("Current");
-
- case IPMI_MONITORING_SENSOR_TYPE_FAN:
- return ("Fan");
-
- case IPMI_MONITORING_SENSOR_TYPE_PHYSICAL_SECURITY:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Physical Security");
-
- case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_SECURITY_VIOLATION_ATTEMPT:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Platform Security Violation Attempt");
-
- case IPMI_MONITORING_SENSOR_TYPE_PROCESSOR:
- *component = NETDATA_SENSOR_COMPONENT_PROCESSOR;
- return ("Processor");
-
- case IPMI_MONITORING_SENSOR_TYPE_POWER_SUPPLY:
- *component = NETDATA_SENSOR_COMPONENT_POWER_SUPPLY;
- return ("Power Supply");
-
- case IPMI_MONITORING_SENSOR_TYPE_POWER_UNIT:
- *component = NETDATA_SENSOR_COMPONENT_POWER_SUPPLY;
- return ("Power Unit");
-
- case IPMI_MONITORING_SENSOR_TYPE_COOLING_DEVICE:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Cooling Device");
-
- case IPMI_MONITORING_SENSOR_TYPE_OTHER_UNITS_BASED_SENSOR:
- return ("Other Units Based Sensor");
-
- case IPMI_MONITORING_SENSOR_TYPE_MEMORY:
- *component = NETDATA_SENSOR_COMPONENT_MEMORY;
- return ("Memory");
-
- case IPMI_MONITORING_SENSOR_TYPE_DRIVE_SLOT:
- *component = NETDATA_SENSOR_COMPONENT_STORAGE;
- return ("Drive Slot");
-
- case IPMI_MONITORING_SENSOR_TYPE_POST_MEMORY_RESIZE:
- *component = NETDATA_SENSOR_COMPONENT_MEMORY;
- return ("POST Memory Resize");
-
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_FIRMWARE_PROGRESS:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("System Firmware Progress");
-
- case IPMI_MONITORING_SENSOR_TYPE_EVENT_LOGGING_DISABLED:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Event Logging Disabled");
-
- case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG1:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Watchdog 1");
-
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_EVENT:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("System Event");
-
- case IPMI_MONITORING_SENSOR_TYPE_CRITICAL_INTERRUPT:
- return ("Critical Interrupt");
-
- case IPMI_MONITORING_SENSOR_TYPE_BUTTON_SWITCH:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Button/Switch");
-
- case IPMI_MONITORING_SENSOR_TYPE_MODULE_BOARD:
- return ("Module/Board");
-
- case IPMI_MONITORING_SENSOR_TYPE_MICROCONTROLLER_COPROCESSOR:
- *component = NETDATA_SENSOR_COMPONENT_PROCESSOR;
- return ("Microcontroller/Coprocessor");
-
- case IPMI_MONITORING_SENSOR_TYPE_ADD_IN_CARD:
- return ("Add In Card");
-
- case IPMI_MONITORING_SENSOR_TYPE_CHASSIS:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Chassis");
-
- case IPMI_MONITORING_SENSOR_TYPE_CHIP_SET:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Chip Set");
-
- case IPMI_MONITORING_SENSOR_TYPE_OTHER_FRU:
- return ("Other Fru");
-
- case IPMI_MONITORING_SENSOR_TYPE_CABLE_INTERCONNECT:
- return ("Cable/Interconnect");
-
- case IPMI_MONITORING_SENSOR_TYPE_TERMINATOR:
- return ("Terminator");
-
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_BOOT_INITIATED:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("System Boot Initiated");
-
- case IPMI_MONITORING_SENSOR_TYPE_BOOT_ERROR:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Boot Error");
-
- case IPMI_MONITORING_SENSOR_TYPE_OS_BOOT:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("OS Boot");
-
- case IPMI_MONITORING_SENSOR_TYPE_OS_CRITICAL_STOP:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("OS Critical Stop");
-
- case IPMI_MONITORING_SENSOR_TYPE_SLOT_CONNECTOR:
- return ("Slot/Connector");
-
- case IPMI_MONITORING_SENSOR_TYPE_SYSTEM_ACPI_POWER_STATE:
- return ("System ACPI Power State");
-
- case IPMI_MONITORING_SENSOR_TYPE_WATCHDOG2:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Watchdog 2");
-
- case IPMI_MONITORING_SENSOR_TYPE_PLATFORM_ALERT:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Platform Alert");
-
- case IPMI_MONITORING_SENSOR_TYPE_ENTITY_PRESENCE:
- return ("Entity Presence");
-
- case IPMI_MONITORING_SENSOR_TYPE_MONITOR_ASIC_IC:
- return ("Monitor ASIC/IC");
-
- case IPMI_MONITORING_SENSOR_TYPE_LAN:
- *component = NETDATA_SENSOR_COMPONENT_NETWORK;
- return ("LAN");
-
- case IPMI_MONITORING_SENSOR_TYPE_MANAGEMENT_SUBSYSTEM_HEALTH:
- *component = NETDATA_SENSOR_COMPONENT_SYSTEM;
- return ("Management Subsystem Health");
-
- case IPMI_MONITORING_SENSOR_TYPE_BATTERY:
- return ("Battery");
-
- case IPMI_MONITORING_SENSOR_TYPE_SESSION_AUDIT:
- return ("Session Audit");
-
- case IPMI_MONITORING_SENSOR_TYPE_VERSION_CHANGE:
- return ("Version Change");
-
- case IPMI_MONITORING_SENSOR_TYPE_FRU_STATE:
- return ("FRU State");
-
- case IPMI_MONITORING_SENSOR_TYPE_UNKNOWN:
- return ("Unknown");
-
- default:
- if(sensor_type >= IPMI_MONITORING_SENSOR_TYPE_OEM_MIN && sensor_type <= IPMI_MONITORING_SENSOR_TYPE_OEM_MAX)
- return ("OEM");
-
- return ("Unrecognized");
- }
-}
-
-#define netdata_ipmi_get_value_int(var, func, ctx) do { \
- (var) = func(ctx); \
- if( (var) < 0) { \
- collector_error("%s(): call to " #func " failed: %s", \
- __FUNCTION__, ipmi_monitoring_ctx_errormsg(ctx)); \
- goto cleanup; \
- } \
- timing_step(TIMING_STEP_FREEIPMI_READ_ ## var); \
-} while(0)
-
-#define netdata_ipmi_get_value_ptr(var, func, ctx) do { \
- (var) = func(ctx); \
- if(!(var)) { \
- collector_error("%s(): call to " #func " failed: %s", \
- __FUNCTION__, ipmi_monitoring_ctx_errormsg(ctx)); \
- goto cleanup; \
- } \
- timing_step(TIMING_STEP_FREEIPMI_READ_ ## var); \
-} while(0)
-
-#define netdata_ipmi_get_value_no_check(var, func, ctx) do { \
- (var) = func(ctx); \
- timing_step(TIMING_STEP_FREEIPMI_READ_ ## var); \
-} while(0)
-
-static int netdata_read_ipmi_sensors(struct ipmi_monitoring_ipmi_config *ipmi_config, struct netdata_ipmi_state *state) {
- timing_init();
-
- ipmi_monitoring_ctx_t ctx = NULL;
- unsigned int sensor_reading_flags = global_sensor_reading_flags;
- int i;
- int sensor_count;
- int rv = -1;
-
- if (!(ctx = ipmi_monitoring_ctx_create ())) {
- collector_error("ipmi_monitoring_ctx_create()");
- goto cleanup;
- }
-
- timing_step(TIMING_STEP_FREEIPMI_CTX_CREATE);
-
- if (sdr_cache_directory) {
- if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, sdr_cache_directory) < 0) {
- collector_error("ipmi_monitoring_ctx_sdr_cache_directory(): %s\n", ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- if (sdr_sensors_cache_format) {
- if (ipmi_monitoring_ctx_sdr_cache_filenames(ctx, sdr_sensors_cache_format) < 0) {
- collector_error("ipmi_monitoring_ctx_sdr_cache_filenames(): %s\n", ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- timing_step(TIMING_STEP_FREEIPMI_DSR_CACHE_DIR);
-
- // Must call otherwise only default interpretations ever used
- // sensor_config_file can be NULL
- if (ipmi_monitoring_ctx_sensor_config_file (ctx, sensor_config_file) < 0) {
- collector_error( "ipmi_monitoring_ctx_sensor_config_file(): %s\n", ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- timing_step(TIMING_STEP_FREEIPMI_SENSOR_CONFIG_FILE);
-
- if ((sensor_count = ipmi_monitoring_sensor_readings_by_record_id (ctx,
- hostname,
- ipmi_config,
- sensor_reading_flags,
- NULL,
- 0,
- NULL,
- NULL)) < 0) {
- collector_error( "ipmi_monitoring_sensor_readings_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- timing_step(TIMING_STEP_FREEIPMI_SENSOR_READINGS_BY_X);
-
- for (i = 0; i < sensor_count; i++, ipmi_monitoring_sensor_iterator_next (ctx)) {
- int record_id, sensor_number, sensor_type, sensor_state, sensor_units,
- sensor_bitmask_type, sensor_bitmask, event_reading_type_code, sensor_reading_type;
-
- char **sensor_bitmask_strings = NULL;
- char *sensor_name = NULL;
- void *sensor_reading;
-
- netdata_ipmi_get_value_int(record_id, ipmi_monitoring_sensor_read_record_id, ctx);
- netdata_ipmi_get_value_int(sensor_number, ipmi_monitoring_sensor_read_sensor_number, ctx);
- netdata_ipmi_get_value_int(sensor_type, ipmi_monitoring_sensor_read_sensor_type, ctx);
- netdata_ipmi_get_value_ptr(sensor_name, ipmi_monitoring_sensor_read_sensor_name, ctx);
- netdata_ipmi_get_value_int(sensor_state, ipmi_monitoring_sensor_read_sensor_state, ctx);
- netdata_ipmi_get_value_int(sensor_units, ipmi_monitoring_sensor_read_sensor_units, ctx);
- netdata_ipmi_get_value_int(sensor_bitmask_type, ipmi_monitoring_sensor_read_sensor_bitmask_type, ctx);
- netdata_ipmi_get_value_int(sensor_bitmask, ipmi_monitoring_sensor_read_sensor_bitmask, ctx);
- // it's ok for this to be NULL, i.e. sensor_bitmask == IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
- netdata_ipmi_get_value_no_check(sensor_bitmask_strings, ipmi_monitoring_sensor_read_sensor_bitmask_strings, ctx);
- netdata_ipmi_get_value_int(sensor_reading_type, ipmi_monitoring_sensor_read_sensor_reading_type, ctx);
- // whatever we read from the sensor, it is ok
- netdata_ipmi_get_value_no_check(sensor_reading, ipmi_monitoring_sensor_read_sensor_reading, ctx);
- netdata_ipmi_get_value_int(event_reading_type_code, ipmi_monitoring_sensor_read_event_reading_type_code, ctx);
-
- netdata_update_ipmi_sensor_reading(
- record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type, sensor_name,
- sensor_reading, event_reading_type_code, sensor_bitmask_type, sensor_bitmask, sensor_bitmask_strings,
- state
- );
-
-#ifdef NETDATA_COMMENTED
- /* It is possible you may want to monitor specific event
- * conditions that may occur. If that is the case, you may want
- * to check out what specific bitmask type and bitmask events
- * occurred. See ipmi_monitoring_bitmasks.h for a list of
- * bitmasks and types.
- */
-
- if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN)
- printf (", %Xh", sensor_bitmask);
- else
- printf (", N/A");
-
- if (sensor_bitmask_type != IPMI_MONITORING_SENSOR_BITMASK_TYPE_UNKNOWN
- && sensor_bitmask_strings)
- {
- unsigned int i = 0;
-
- printf (",");
-
- while (sensor_bitmask_strings[i])
- {
- printf (" ");
-
- printf ("'%s'",
- sensor_bitmask_strings[i]);
-
- i++;
- }
- }
- else
- printf (", N/A");
-
- printf ("\n");
-#endif // NETDATA_COMMENTED
- }
-
- rv = 0;
-
-cleanup:
- if (ctx)
- ipmi_monitoring_ctx_destroy (ctx);
-
- timing_report();
-
- if(remove_reread_sdr_after_first_use)
- global_sensor_reading_flags &= ~(IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE);
-
- return (rv);
-}
-
-
-static int netdata_get_ipmi_sel_events_count(struct ipmi_monitoring_ipmi_config *ipmi_config, struct netdata_ipmi_state *state) {
- timing_init();
-
- ipmi_monitoring_ctx_t ctx = NULL;
- unsigned int sel_flags = global_sel_flags;
- int sel_count;
- int rv = -1;
-
- if (!(ctx = ipmi_monitoring_ctx_create ())) {
- collector_error("ipmi_monitoring_ctx_create()");
- goto cleanup;
- }
-
- if (sdr_cache_directory) {
- if (ipmi_monitoring_ctx_sdr_cache_directory (ctx, sdr_cache_directory) < 0) {
- collector_error( "ipmi_monitoring_ctx_sdr_cache_directory(): %s", ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
- if (sdr_sel_cache_format) {
- if (ipmi_monitoring_ctx_sdr_cache_filenames(ctx, sdr_sel_cache_format) < 0) {
- collector_error("ipmi_monitoring_ctx_sdr_cache_filenames(): %s\n", ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
- }
-
- // Must call otherwise only default interpretations ever used
- // sel_config_file can be NULL
- if (ipmi_monitoring_ctx_sel_config_file (ctx, sel_config_file) < 0) {
- collector_error( "ipmi_monitoring_ctx_sel_config_file(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- if ((sel_count = ipmi_monitoring_sel_by_record_id (ctx,
- hostname,
- ipmi_config,
- sel_flags,
- NULL,
- 0,
- NULL,
- NULL)) < 0) {
- collector_error( "ipmi_monitoring_sel_by_record_id(): %s",
- ipmi_monitoring_ctx_errormsg (ctx));
- goto cleanup;
- }
-
- netdata_update_ipmi_sel_events_count(state, sel_count);
-
- rv = 0;
-
-cleanup:
- if (ctx)
- ipmi_monitoring_ctx_destroy (ctx);
-
- timing_report();
-
- if(remove_reread_sdr_after_first_use)
- global_sel_flags &= ~(IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE);
-
- return (rv);
-}
-
-// ----------------------------------------------------------------------------
-// copied from freeipmi codebase commit 8dea6dec4012d0899901e595f2c868a05e1cefed
-// added netdata_ in-front to not overwrite library functions
-
-// FROM: common/miscutil/network.c
-static int netdata_host_is_localhost (const char *host) {
- /* Ordered by my assumption of most popular */
- if (!strcasecmp (host, "localhost")
- || !strcmp (host, "127.0.0.1")
- || !strcasecmp (host, "ipv6-localhost")
- || !strcmp (host, "::1")
- || !strcasecmp (host, "ip6-localhost")
- || !strcmp (host, "0:0:0:0:0:0:0:1"))
- return (1);
-
- return (0);
-}
-
-// FROM: common/parsecommon/parse-common.h
-#define IPMI_PARSE_DEVICE_LAN_STR "lan"
-#define IPMI_PARSE_DEVICE_LAN_2_0_STR "lan_2_0"
-#define IPMI_PARSE_DEVICE_LAN_2_0_STR2 "lan20"
-#define IPMI_PARSE_DEVICE_LAN_2_0_STR3 "lan_20"
-#define IPMI_PARSE_DEVICE_LAN_2_0_STR4 "lan2_0"
-#define IPMI_PARSE_DEVICE_LAN_2_0_STR5 "lanplus"
-#define IPMI_PARSE_DEVICE_KCS_STR "kcs"
-#define IPMI_PARSE_DEVICE_SSIF_STR "ssif"
-#define IPMI_PARSE_DEVICE_OPENIPMI_STR "openipmi"
-#define IPMI_PARSE_DEVICE_OPENIPMI_STR2 "open"
-#define IPMI_PARSE_DEVICE_SUNBMC_STR "sunbmc"
-#define IPMI_PARSE_DEVICE_SUNBMC_STR2 "bmc"
-#define IPMI_PARSE_DEVICE_INTELDCMI_STR "inteldcmi"
-
-// FROM: common/parsecommon/parse-common.c
-// changed the return values to match ipmi_monitoring.h
-static int netdata_parse_outofband_driver_type (const char *str) {
- if (strcasecmp (str, IPMI_PARSE_DEVICE_LAN_STR) == 0)
- return (IPMI_MONITORING_PROTOCOL_VERSION_1_5);
-
- /* support "lanplus" for those that might be used to ipmitool.
- * support typo variants to ease.
- */
- else if (strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR2) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR3) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR4) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_LAN_2_0_STR5) == 0)
- return (IPMI_MONITORING_PROTOCOL_VERSION_2_0);
-
- return (-1);
-}
-
-// FROM: common/parsecommon/parse-common.c
-// changed the return values to match ipmi_monitoring.h
-static int netdata_parse_inband_driver_type (const char *str) {
- if (strcasecmp (str, IPMI_PARSE_DEVICE_KCS_STR) == 0)
- return (IPMI_MONITORING_DRIVER_TYPE_KCS);
- else if (strcasecmp (str, IPMI_PARSE_DEVICE_SSIF_STR) == 0)
- return (IPMI_MONITORING_DRIVER_TYPE_SSIF);
- /* support "open" for those that might be used to
- * ipmitool.
- */
- else if (strcasecmp (str, IPMI_PARSE_DEVICE_OPENIPMI_STR) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_OPENIPMI_STR2) == 0)
- return (IPMI_MONITORING_DRIVER_TYPE_OPENIPMI);
- /* support "bmc" for those that might be used to
- * ipmitool.
- */
- else if (strcasecmp (str, IPMI_PARSE_DEVICE_SUNBMC_STR) == 0
- || strcasecmp (str, IPMI_PARSE_DEVICE_SUNBMC_STR2) == 0)
- return (IPMI_MONITORING_DRIVER_TYPE_SUNBMC);
-
-#ifdef IPMI_MONITORING_DRIVER_TYPE_INTELDCMI
- else if (strcasecmp (str, IPMI_PARSE_DEVICE_INTELDCMI_STR) == 0)
- return (IPMI_MONITORING_DRIVER_TYPE_INTELDCMI);
-#endif // IPMI_MONITORING_DRIVER_TYPE_INTELDCMI
-
- return (-1);
-}
-
-// ----------------------------------------------------------------------------
-// BEGIN NETDATA CODE
-
-typedef enum __attribute__((packed)) {
- IPMI_COLLECT_TYPE_SENSORS = (1 << 0),
- IPMI_COLLECT_TYPE_SEL = (1 << 1),
-} IPMI_COLLECTION_TYPE;
-
-struct sensor {
- int sensor_type;
- int sensor_state;
- int sensor_units;
- char *sensor_name;
-
- int sensor_reading_type;
- union {
- uint8_t bool_value;
- uint32_t uint32_value;
- double double_value;
- } sensor_reading;
-
- // netdata provided
- const char *context;
- const char *title;
- const char *units;
- const char *family;
- const char *chart_type;
- const char *dimension;
- int priority;
-
- const char *type;
- const char *component;
-
- int multiplier;
- bool do_metric;
- bool do_state;
- bool metric_chart_sent;
- bool state_chart_sent;
- usec_t last_collected_metric_ut;
- usec_t last_collected_state_ut;
-};
-
-typedef enum __attribute__((packed)) {
- ICS_INIT,
- ICS_INIT_FAILED,
- ICS_RUNNING,
- ICS_FAILED,
-} IPMI_COLLECTOR_STATUS;
-
-struct netdata_ipmi_state {
- bool debug;
-
- struct {
- IPMI_COLLECTOR_STATUS status;
- usec_t last_iteration_ut;
- size_t collected;
- usec_t now_ut;
- usec_t freq_ut;
- int priority;
- DICTIONARY *dict;
- } sensors;
-
- struct {
- IPMI_COLLECTOR_STATUS status;
- usec_t last_iteration_ut;
- size_t events;
- usec_t now_ut;
- usec_t freq_ut;
- int priority;
- } sel;
-
- struct {
- usec_t now_ut;
- } updates;
-};
-
-struct netdata_ipmi_state state = {0};
-
-// ----------------------------------------------------------------------------
-// excluded record ids maintenance (both for sensor data and state)
-
-static int *excluded_record_ids = NULL;
-size_t excluded_record_ids_length = 0;
-
-static void excluded_record_ids_parse(const char *s, bool debug) {
- if(!s) return;
-
- while(*s) {
- while(*s && !isdigit(*s)) s++;
-
- if(isdigit(*s)) {
- char *e;
- unsigned long n = strtoul(s, &e, 10);
- s = e;
-
- if(n != 0) {
- excluded_record_ids = reallocz(excluded_record_ids, (excluded_record_ids_length + 1) * sizeof(int));
- excluded_record_ids[excluded_record_ids_length++] = (int)n;
- }
- }
- }
-
- if(debug) {
- fprintf(stderr, "%s: excluded record ids:", program_name);
- size_t i;
- for(i = 0; i < excluded_record_ids_length; i++) {
- fprintf(stderr, " %d", excluded_record_ids[i]);
- }
- fprintf(stderr, "\n");
- }
-}
-
-static int *excluded_status_record_ids = NULL;
-size_t excluded_status_record_ids_length = 0;
-
-static void excluded_status_record_ids_parse(const char *s, bool debug) {
- if(!s) return;
-
- while(*s) {
- while(*s && !isdigit(*s)) s++;
-
- if(isdigit(*s)) {
- char *e;
- unsigned long n = strtoul(s, &e, 10);
- s = e;
-
- if(n != 0) {
- excluded_status_record_ids = reallocz(excluded_status_record_ids, (excluded_status_record_ids_length + 1) * sizeof(int));
- excluded_status_record_ids[excluded_status_record_ids_length++] = (int)n;
- }
- }
- }
-
- if(debug) {
- fprintf(stderr, "%s: excluded status record ids:", program_name);
- size_t i;
- for(i = 0; i < excluded_status_record_ids_length; i++) {
- fprintf(stderr, " %d", excluded_status_record_ids[i]);
- }
- fprintf(stderr, "\n");
- }
-}
-
-
-static int excluded_record_ids_check(int record_id) {
- size_t i;
-
- for(i = 0; i < excluded_record_ids_length; i++) {
- if(excluded_record_ids[i] == record_id)
- return 1;
- }
-
- return 0;
-}
-
-static int excluded_status_record_ids_check(int record_id) {
- size_t i;
-
- for(i = 0; i < excluded_status_record_ids_length; i++) {
- if(excluded_status_record_ids[i] == record_id)
- return 1;
- }
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-// data collection functions
-
-struct {
- const char *search;
- SIMPLE_PATTERN *pattern;
- const char *label;
-} sensors_component_patterns[] = {
-
- // The order is important!
- // They are evaluated top to bottom
- // The first the matches is used
-
- {
- .search = "*DIMM*|*_DIM*|*VTT*|*VDDQ*|*ECC*|*MEM*CRC*|*MEM*BD*",
- .label = NETDATA_SENSOR_COMPONENT_MEMORY_MODULE,
- },
- {
- .search = "*CPU*|SOC_*|*VDDCR*|P*_VDD*|*_DTS|*VCORE*|*PROC*",
- .label = NETDATA_SENSOR_COMPONENT_PROCESSOR,
- },
- {
- .search = "IPU*",
- .label = NETDATA_SENSOR_COMPONENT_IPU,
- },
- {
- .search = "M2_*|*SSD*|*HSC*|*HDD*|*NVME*",
- .label = NETDATA_SENSOR_COMPONENT_STORAGE,
- },
- {
- .search = "MB_*|*PCH*|*VBAT*|*I/O*BD*|*IO*BD*",
- .label = NETDATA_SENSOR_COMPONENT_MOTHERBOARD,
- },
- {
- .search = "Watchdog|SEL|SYS_*|*CHASSIS*",
- .label = NETDATA_SENSOR_COMPONENT_SYSTEM,
- },
- {
- .search = "PS*|P_*|*PSU*|*PWR*|*TERMV*|*D2D*",
- .label = NETDATA_SENSOR_COMPONENT_POWER_SUPPLY,
- },
-
- // fallback components
- {
- .search = "VR_P*|*VRMP*",
- .label = NETDATA_SENSOR_COMPONENT_PROCESSOR,
- },
- {
- .search = "*VSB*|*PS*",
- .label = NETDATA_SENSOR_COMPONENT_POWER_SUPPLY,
- },
- {
- .search = "*MEM*|*MEM*RAID*",
- .label = NETDATA_SENSOR_COMPONENT_MEMORY,
- },
- {
- .search = "*RAID*", // there is also "Memory RAID", so keep this after memory
- .label = NETDATA_SENSOR_COMPONENT_STORAGE,
- },
- {
- .search = "*PERIPHERAL*|*USB*",
- .label = NETDATA_SENSOR_COMPONENT_PERIPHERAL,
- },
- {
- .search = "*FAN*|*12V*|*VCC*|*PCI*|*CHIPSET*|*AMP*|*BD*",
- .label = NETDATA_SENSOR_COMPONENT_SYSTEM,
- },
-
- // terminator
- {
- .search = NULL,
- .label = NULL,
- }
-};
-
-static const char *netdata_sensor_name_to_component(const char *sensor_name) {
- for(int i = 0; sensors_component_patterns[i].search ;i++) {
- if(!sensors_component_patterns[i].pattern)
- sensors_component_patterns[i].pattern = simple_pattern_create(sensors_component_patterns[i].search, "|", SIMPLE_PATTERN_EXACT, false);
-
- if(simple_pattern_matches(sensors_component_patterns[i].pattern, sensor_name))
- return sensors_component_patterns[i].label;
- }
-
- return "Other";
-}
-
-const char *netdata_collect_type_to_string(IPMI_COLLECTION_TYPE type) {
- if((type & (IPMI_COLLECT_TYPE_SENSORS|IPMI_COLLECT_TYPE_SEL)) == (IPMI_COLLECT_TYPE_SENSORS|IPMI_COLLECT_TYPE_SEL))
- return "sensors,sel";
- if(type & IPMI_COLLECT_TYPE_SEL)
- return "sel";
- if(type & IPMI_COLLECT_TYPE_SENSORS)
- return "sensors";
-
- return "unknown";
-}
-
-static void netdata_sensor_set_value(struct sensor *sn, void *sensor_reading, struct netdata_ipmi_state *state __maybe_unused) {
- switch(sn->sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- sn->sensor_reading.bool_value = *((uint8_t *)sensor_reading);
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- sn->sensor_reading.uint32_value = *((uint32_t *)sensor_reading);
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- sn->sensor_reading.double_value = *((double *)sensor_reading);
- break;
-
- default:
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNKNOWN:
- sn->do_metric = false;
- break;
- }
-}
-
-static void netdata_update_ipmi_sensor_reading(
- int record_id
- , int sensor_number
- , int sensor_type
- , int sensor_state
- , int sensor_units
- , int sensor_reading_type
- , char *sensor_name
- , void *sensor_reading
- , int event_reading_type_code __maybe_unused
- , int sensor_bitmask_type __maybe_unused
- , int sensor_bitmask __maybe_unused
- , char **sensor_bitmask_strings __maybe_unused
- , struct netdata_ipmi_state *state
-) {
- if(unlikely(sensor_state == IPMI_MONITORING_STATE_UNKNOWN &&
- sensor_type == IPMI_MONITORING_SENSOR_TYPE_UNKNOWN &&
- sensor_units == IPMI_MONITORING_SENSOR_UNITS_UNKNOWN &&
- sensor_reading_type == IPMI_MONITORING_SENSOR_READING_TYPE_UNKNOWN &&
- (!sensor_name || !*sensor_name)))
- // we can't do anything about this sensor - everything is unknown
- return;
-
- if(unlikely(!sensor_name || !*sensor_name))
- sensor_name = "UNNAMED";
-
- state->sensors.collected++;
-
- char key[SENSORS_DICT_KEY_SIZE + 1];
- snprintfz(key, SENSORS_DICT_KEY_SIZE, "i%d_n%d_t%d_u%d_%s",
- record_id, sensor_number, sensor_reading_type, sensor_units, sensor_name);
-
- // find the sensor record
- const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(state->sensors.dict, key);
- if(likely(item)) {
- // recurring collection
-
- if(state->debug)
- fprintf(stderr, "%s: reusing sensor record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n",
- program_name, sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
-
- struct sensor *sn = dictionary_acquired_item_value(item);
-
- if(sensor_reading) {
- netdata_sensor_set_value(sn, sensor_reading, state);
- sn->last_collected_metric_ut = state->sensors.now_ut;
- }
-
- sn->sensor_state = sensor_state;
-
- sn->last_collected_state_ut = state->sensors.now_ut;
-
- dictionary_acquired_item_release(state->sensors.dict, item);
-
- return;
- }
-
- if(state->debug)
- fprintf(stderr, "Allocating new sensor data record for sensor '%s', id %d, number %d, type %d, state %d, units %d, reading_type %d\n",
- sensor_name, record_id, sensor_number, sensor_type, sensor_state, sensor_units, sensor_reading_type);
-
- // check if it is excluded
- bool excluded_metric = excluded_record_ids_check(record_id);
- bool excluded_state = excluded_status_record_ids_check(record_id);
-
- if(excluded_metric) {
- if(state->debug)
- fprintf(stderr, "Sensor '%s' is excluded by excluded_record_ids_check()\n", sensor_name);
- }
-
- if(excluded_state) {
- if(state->debug)
- fprintf(stderr, "Sensor '%s' is excluded for status check, by excluded_status_record_ids_check()\n", sensor_name);
- }
-
- struct sensor t = {
- .sensor_type = sensor_type,
- .sensor_state = sensor_state,
- .sensor_units = sensor_units,
- .sensor_reading_type = sensor_reading_type,
- .sensor_name = strdupz(sensor_name),
- .component = netdata_sensor_name_to_component(sensor_name),
- .do_state = !excluded_state,
- .do_metric = !excluded_metric,
- };
-
- t.type = netdata_ipmi_get_sensor_type_string(t.sensor_type, &t.component);
-
- switch(t.sensor_units) {
- case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
- t.dimension = "temperature";
- t.context = "ipmi.sensor_temperature_c";
- t.title = "IPMI Sensor Temperature Celsius";
- t.units = "Celsius";
- t.family = "temperatures";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 10;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_FAHRENHEIT:
- t.dimension = "temperature";
- t.context = "ipmi.sensor_temperature_f";
- t.title = "IPMI Sensor Temperature Fahrenheit";
- t.units = "Fahrenheit";
- t.family = "temperatures";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 20;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
- t.dimension = "voltage";
- t.context = "ipmi.sensor_voltage";
- t.title = "IPMI Sensor Voltage";
- t.units = "Volts";
- t.family = "voltages";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 30;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_AMPS:
- t.dimension = "ampere";
- t.context = "ipmi.sensor_ampere";
- t.title = "IPMI Sensor Current";
- t.units = "Amps";
- t.family = "current";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 40;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_RPM:
- t.dimension = "rotations";
- t.context = "ipmi.sensor_fan_speed";
- t.title = "IPMI Sensor Fans Speed";
- t.units = "RPM";
- t.family = "fans";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 50;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_WATTS:
- t.dimension = "power";
- t.context = "ipmi.sensor_power";
- t.title = "IPMI Sensor Power";
- t.units = "Watts";
- t.family = "power";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 60;
- break;
-
- case IPMI_MONITORING_SENSOR_UNITS_PERCENT:
- t.dimension = "percentage";
- t.context = "ipmi.sensor_reading_percent";
- t.title = "IPMI Sensor Reading Percentage";
- t.units = "%%";
- t.family = "other";
- t.chart_type = "line";
- t.priority = state->sensors.priority + 70;
- break;
-
- default:
- t.priority = state->sensors.priority + 80;
- t.do_metric = false;
- break;
- }
-
- switch(sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- t.multiplier = 1000;
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- t.multiplier = 1;
- break;
-
- default:
- t.do_metric = false;
- break;
- }
-
- if(sensor_reading) {
- netdata_sensor_set_value(&t, sensor_reading, state);
- t.last_collected_metric_ut = state->sensors.now_ut;
- }
- t.last_collected_state_ut = state->sensors.now_ut;
-
- dictionary_set(state->sensors.dict, key, &t, sizeof(t));
-}
-
-static void netdata_update_ipmi_sel_events_count(struct netdata_ipmi_state *state, uint32_t events) {
- state->sel.events = events;
-}
-
-int netdata_ipmi_collect_data(struct ipmi_monitoring_ipmi_config *ipmi_config, IPMI_COLLECTION_TYPE type, struct netdata_ipmi_state *state) {
- errno = 0;
-
- if(type & IPMI_COLLECT_TYPE_SENSORS) {
- state->sensors.collected = 0;
- state->sensors.now_ut = now_monotonic_usec();
-
- if (netdata_read_ipmi_sensors(ipmi_config, state) < 0) return -1;
- }
-
- if(type & IPMI_COLLECT_TYPE_SEL) {
- state->sel.events = 0;
- state->sel.now_ut = now_monotonic_usec();
- if(netdata_get_ipmi_sel_events_count(ipmi_config, state) < 0) return -2;
- }
-
- return 0;
-}
-
-int netdata_ipmi_detect_speed_secs(struct ipmi_monitoring_ipmi_config *ipmi_config, IPMI_COLLECTION_TYPE type, struct netdata_ipmi_state *state) {
- int i, checks = SPEED_TEST_ITERATIONS, successful = 0;
- usec_t total = 0;
-
- for(i = 0 ; i < checks ; i++) {
- if(unlikely(state->debug))
- fprintf(stderr, "%s: checking %s data collection speed iteration %d of %d\n",
- program_name, netdata_collect_type_to_string(type), i + 1, checks);
-
- // measure the time a data collection needs
- usec_t start = now_realtime_usec();
-
- if(netdata_ipmi_collect_data(ipmi_config, type, state) < 0)
- continue;
-
- usec_t end = now_realtime_usec();
-
- successful++;
-
- if(unlikely(state->debug))
- fprintf(stderr, "%s: %s data collection speed was %"PRIu64" usec\n",
- program_name, netdata_collect_type_to_string(type), end - start);
-
- // add it to our total
- total += end - start;
-
- // wait the same time
- // to avoid flooding the IPMI processor with requests
- sleep_usec(end - start);
- }
-
- if(!successful)
- return 0;
-
- // so, we assume it needed 2x the time
- // we find the average in microseconds
- // and we round-up to the closest second
-
- return (int)(( total * 2 / successful / USEC_PER_SEC ) + 1);
-}
-
-// ----------------------------------------------------------------------------
-// data collection threads
-
-struct ipmi_collection_thread {
- struct ipmi_monitoring_ipmi_config ipmi_config;
- int freq_s;
- bool debug;
- IPMI_COLLECTION_TYPE type;
- SPINLOCK spinlock;
- struct netdata_ipmi_state state;
-};
-
-void *netdata_ipmi_collection_thread(void *ptr) {
- struct ipmi_collection_thread *t = ptr;
-
- if(t->debug) fprintf(stderr, "%s: calling initialize_ipmi_config() for %s\n",
- program_name, netdata_collect_type_to_string(t->type));
-
- initialize_ipmi_config(&t->ipmi_config);
-
- if(t->debug) fprintf(stderr, "%s: detecting IPMI minimum update frequency for %s...\n",
- program_name, netdata_collect_type_to_string(t->type));
-
- int freq_s = netdata_ipmi_detect_speed_secs(&t->ipmi_config, t->type, &t->state);
- if(!freq_s) {
- if(t->type & IPMI_COLLECT_TYPE_SENSORS) {
- t->state.sensors.status = ICS_INIT_FAILED;
- t->state.sensors.last_iteration_ut = 0;
- }
-
- if(t->type & IPMI_COLLECT_TYPE_SEL) {
- t->state.sel.status = ICS_INIT_FAILED;
- t->state.sel.last_iteration_ut = 0;
- }
-
- return ptr;
- }
- else {
- if(t->type & IPMI_COLLECT_TYPE_SENSORS) {
- t->state.sensors.status = ICS_RUNNING;
- }
-
- if(t->type & IPMI_COLLECT_TYPE_SEL) {
- t->state.sel.status = ICS_RUNNING;
- }
- }
-
- t->freq_s = freq_s = MAX(t->freq_s, freq_s);
-
- if(t->debug) {
- fprintf(stderr, "%s: IPMI minimum update frequency of %s was calculated to %d seconds.\n",
- program_name, netdata_collect_type_to_string(t->type), t->freq_s);
-
- fprintf(stderr, "%s: starting data collection of %s\n",
- program_name, netdata_collect_type_to_string(t->type));
- }
-
- size_t iteration = 0, failures = 0;
- usec_t step = t->freq_s * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(++iteration) {
- heartbeat_next(&hb, step);
-
- if(t->debug)
- fprintf(stderr, "%s: calling netdata_ipmi_collect_data() for %s\n",
- program_name, netdata_collect_type_to_string(t->type));
-
- struct netdata_ipmi_state tmp_state = t->state;
-
- if(t->type & IPMI_COLLECT_TYPE_SENSORS) {
- tmp_state.sensors.last_iteration_ut = now_monotonic_usec();
- tmp_state.sensors.freq_ut = t->freq_s * USEC_PER_SEC;
- }
-
- if(t->type & IPMI_COLLECT_TYPE_SEL) {
- tmp_state.sel.last_iteration_ut = now_monotonic_usec();
- tmp_state.sel.freq_ut = t->freq_s * USEC_PER_SEC;
- }
-
- if(netdata_ipmi_collect_data(&t->ipmi_config, t->type, &tmp_state) != 0)
- failures++;
- else
- failures = 0;
-
- if(failures > 10) {
- collector_error("%s() failed to collect %s data for %zu consecutive times, having made %zu iterations.",
- __FUNCTION__, netdata_collect_type_to_string(t->type), failures, iteration);
-
- if(t->type & IPMI_COLLECT_TYPE_SENSORS) {
- t->state.sensors.status = ICS_FAILED;
- t->state.sensors.last_iteration_ut = 0;
- }
-
- if(t->type & IPMI_COLLECT_TYPE_SEL) {
- t->state.sel.status = ICS_FAILED;
- t->state.sel.last_iteration_ut = 0;
- }
-
- break;
- }
-
- spinlock_lock(&t->spinlock);
- t->state = tmp_state;
- spinlock_unlock(&t->spinlock);
- }
-
- return ptr;
-}
-
-// ----------------------------------------------------------------------------
-// sending data to netdata
-
-static inline bool is_sensor_updated(usec_t last_collected_ut, usec_t now_ut, usec_t freq) {
- return (now_ut - last_collected_ut < freq * 2) ? true : false;
-}
-
-static size_t send_ipmi_sensor_metrics_to_netdata(struct netdata_ipmi_state *state) {
- if(state->sensors.status != ICS_RUNNING) {
- if(unlikely(state->debug))
- fprintf(stderr, "%s: %s() sensors state is not RUNNING\n",
- program_name, __FUNCTION__ );
- return 0;
- }
-
- size_t total_sensors_sent = 0;
- int update_every = (int)(state->sensors.freq_ut / USEC_PER_SEC);
- struct sensor *sn;
-
- netdata_mutex_lock(&stdout_mutex);
- // generate the CHART/DIMENSION lines, if we have to
- dfe_start_reentrant(state->sensors.dict, sn) {
- if(unlikely(!sn->do_metric && !sn->do_state))
- continue;
-
- bool did_metric = false, did_state = false;
-
- if(likely(sn->do_metric)) {
- if(unlikely(!is_sensor_updated(sn->last_collected_metric_ut, state->updates.now_ut, state->sensors.freq_ut))) {
- if(unlikely(state->debug))
- fprintf(stderr, "%s: %s() sensor '%s' metric is not UPDATED (last updated %"PRIu64", now %"PRIu64", freq %"PRIu64"\n",
- program_name, __FUNCTION__, sn->sensor_name, sn->last_collected_metric_ut, state->updates.now_ut, state->sensors.freq_ut);
- }
- else {
- if (unlikely(!sn->metric_chart_sent)) {
- sn->metric_chart_sent = true;
-
- printf("CHART '%s_%s' '' '%s' '%s' '%s' '%s' '%s' %d %d '' '%s' '%s'\n",
- sn->context, sn_dfe.name, sn->title, sn->units, sn->family, sn->context,
- sn->chart_type, sn->priority + 1, update_every, program_name, "sensors");
-
- printf("CLABEL 'sensor' '%s' 1\n", sn->sensor_name);
- printf("CLABEL 'type' '%s' 1\n", sn->type);
- printf("CLABEL 'component' '%s' 1\n", sn->component);
- printf("CLABEL_COMMIT\n");
-
- printf("DIMENSION '%s' '' absolute 1 %d\n", sn->dimension, sn->multiplier);
- }
-
- printf("BEGIN '%s_%s'\n", sn->context, sn_dfe.name);
-
- switch (sn->sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- printf("SET '%s' = %u\n", sn->dimension, sn->sensor_reading.uint32_value
- );
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- printf("SET '%s' = %lld\n", sn->dimension,
- (long long int) (sn->sensor_reading.double_value * sn->multiplier)
- );
- break;
-
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- printf("SET '%s' = %u\n", sn->dimension, sn->sensor_reading.bool_value
- );
- break;
-
- default:
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNKNOWN:
- // this should never happen because we also do the same check at netdata_get_sensor()
- sn->do_metric = false;
- break;
- }
-
- printf("END\n");
- did_metric = true;
- }
- }
-
- if(likely(sn->do_state)) {
- if(unlikely(!is_sensor_updated(sn->last_collected_state_ut, state->updates.now_ut, state->sensors.freq_ut))) {
- if (unlikely(state->debug))
- fprintf(stderr, "%s: %s() sensor '%s' state is not UPDATED (last updated %"PRIu64", now %"PRIu64", freq %"PRIu64"\n",
- program_name, __FUNCTION__, sn->sensor_name, sn->last_collected_state_ut, state->updates.now_ut, state->sensors.freq_ut);
- }
- else {
- if (unlikely(!sn->state_chart_sent)) {
- sn->state_chart_sent = true;
-
- printf("CHART 'ipmi.sensor_state_%s' '' 'IPMI Sensor State' 'state' 'states' 'ipmi.sensor_state' 'line' %d %d '' '%s' '%s'\n",
- sn_dfe.name, sn->priority, update_every, program_name, "sensors");
-
- printf("CLABEL 'sensor' '%s' 1\n", sn->sensor_name);
- printf("CLABEL 'type' '%s' 1\n", sn->type);
- printf("CLABEL 'component' '%s' 1\n", sn->component);
- printf("CLABEL_COMMIT\n");
-
- printf("DIMENSION 'nominal' '' absolute 1 1\n");
- printf("DIMENSION 'warning' '' absolute 1 1\n");
- printf("DIMENSION 'critical' '' absolute 1 1\n");
- printf("DIMENSION 'unknown' '' absolute 1 1\n");
- }
-
- printf("BEGIN 'ipmi.sensor_state_%s'\n", sn_dfe.name);
- printf("SET 'nominal' = %lld\n", sn->sensor_state == IPMI_MONITORING_STATE_NOMINAL ? 1LL : 0LL);
- printf("SET 'warning' = %lld\n", sn->sensor_state == IPMI_MONITORING_STATE_WARNING ? 1LL : 0LL);
- printf("SET 'critical' = %lld\n", sn->sensor_state == IPMI_MONITORING_STATE_CRITICAL ? 1LL : 0LL);
- printf("SET 'unknown' = %lld\n", sn->sensor_state == IPMI_MONITORING_STATE_UNKNOWN ? 1LL : 0LL);
- printf("END\n");
- did_state = true;
- }
- }
-
- if(likely(did_metric || did_state))
- total_sensors_sent++;
- }
- dfe_done(sn);
-
- netdata_mutex_unlock(&stdout_mutex);
-
- return total_sensors_sent;
-}
-
-static size_t send_ipmi_sel_metrics_to_netdata(struct netdata_ipmi_state *state) {
- static bool sel_chart_generated = false;
-
- netdata_mutex_lock(&stdout_mutex);
-
- if(likely(state->sel.status == ICS_RUNNING)) {
- if(unlikely(!sel_chart_generated)) {
- sel_chart_generated = true;
- printf("CHART ipmi.events '' 'IPMI Events' 'events' 'events' ipmi.sel area %d %d '' '%s' '%s'\n"
- , state->sel.priority + 2
- , (int)(state->sel.freq_ut / USEC_PER_SEC)
- , program_name
- , "sel"
- );
- printf("DIMENSION events '' absolute 1 1\n");
- }
-
- printf(
- "BEGIN ipmi.events\n"
- "SET events = %zu\n"
- "END\n"
- , state->sel.events
- );
- }
-
- netdata_mutex_unlock(&stdout_mutex);
-
- return state->sel.events;
-}
-
-// ----------------------------------------------------------------------------
-
-static const char *get_sensor_state_string(struct sensor *sn) {
- switch (sn->sensor_state) {
- case IPMI_MONITORING_STATE_NOMINAL:
- return "nominal";
- case IPMI_MONITORING_STATE_WARNING:
- return "warning";
- case IPMI_MONITORING_STATE_CRITICAL:
- return "critical";
- default:
- return "unknown";
- }
-}
-
-static const char *get_sensor_function_priority(struct sensor *sn) {
- switch (sn->sensor_state) {
- case IPMI_MONITORING_STATE_WARNING:
- return "warning";
- case IPMI_MONITORING_STATE_CRITICAL:
- return "critical";
- default:
- return "normal";
- }
-}
-
-static void freeimi_function_sensors(const char *transaction, char *function __maybe_unused, int timeout __maybe_unused, bool *cancelled __maybe_unused) {
- time_t expires = now_realtime_sec() + update_every;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", update_every);
- buffer_json_member_add_string(wb, "help", "View IPMI sensor readings and its state");
- buffer_json_member_add_array(wb, "data");
-
- struct sensor *sn;
- dfe_start_reentrant(state.sensors.dict, sn) {
- if (unlikely(!sn->do_metric && !sn->do_state))
- continue;
-
- double reading = NAN;
- switch (sn->sensor_reading_type) {
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER32:
- reading = (double)sn->sensor_reading.uint32_value;
- break;
- case IPMI_MONITORING_SENSOR_READING_TYPE_DOUBLE:
- reading = (double)(sn->sensor_reading.double_value);
- break;
- case IPMI_MONITORING_SENSOR_READING_TYPE_UNSIGNED_INTEGER8_BOOL:
- reading = (double)sn->sensor_reading.bool_value;
- break;
- }
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, sn->sensor_name);
- buffer_json_add_array_item_string(wb, sn->type);
- buffer_json_add_array_item_string(wb, sn->component);
- buffer_json_add_array_item_double(wb, reading);
- buffer_json_add_array_item_string(wb, sn->units);
- buffer_json_add_array_item_string(wb, get_sensor_state_string(sn));
-
- buffer_json_add_array_item_object(wb);
- buffer_json_member_add_string(wb, "severity", get_sensor_function_priority(sn));
- buffer_json_object_close(wb);
-
- buffer_json_array_close(wb);
- }
- dfe_done(sn);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- buffer_rrdf_table_add_field(wb, field_id++, "Sensor", "Sensor Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Type", "Sensor Type",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Component", "Sensor Component",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Reading", "Sensor Current Reading",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, NULL, 0, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Units", "Sensor Reading Units",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "State", "Sensor State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(
- wb, field_id++,
- "rowOptions", "rowOptions",
- RRDF_FIELD_TYPE_NONE,
- RRDR_FIELD_VISUAL_ROW_OPTIONS,
- RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_FIXED,
- NULL,
- RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_DUMMY,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "Type");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "Sensors");
- {
- buffer_json_member_add_string(wb, "name", "Sensors");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Sensor");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Sensors");
- buffer_json_add_array_item_string(wb, "Component");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Sensors");
- buffer_json_add_array_item_string(wb, "State");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb);
-
- buffer_free(wb);
-}
-
-// ----------------------------------------------------------------------------
-// main, command line arguments parsing
-
-static void plugin_exit(int code) {
- fflush(stdout);
- function_plugin_should_exit = true;
- exit(code);
-}
-
-int main (int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("freeipmi.plugin");
- netdata_threads_init_for_external_plugins(0); // set the default threads stack size here
-
- bool netdata_do_sel = IPMI_ENABLE_SEL_BY_DEFAULT;
-
- bool debug = false;
-
- // ------------------------------------------------------------------------
- // parse command line parameters
-
- int i, freq_s = 0;
- for(i = 1; i < argc ; i++) {
- if(isdigit(*argv[i]) && !freq_s) {
- int n = str2i(argv[i]);
- if(n > 0 && n < 86400) {
- freq_s = n;
- continue;
- }
- }
- else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("%s %s\n", program_name, VERSION);
- exit(0);
- }
- else if(strcmp("debug", argv[i]) == 0) {
- debug = true;
- continue;
- }
- else if(strcmp("sel", argv[i]) == 0) {
- netdata_do_sel = true;
- continue;
- }
- else if(strcmp("no-sel", argv[i]) == 0) {
- netdata_do_sel = false;
- continue;
- }
- else if(strcmp("reread-sdr-cache", argv[i]) == 0) {
- global_sel_flags |= IPMI_MONITORING_SEL_FLAGS_REREAD_SDR_CACHE;
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
- remove_reread_sdr_after_first_use = false;
- if (debug) fprintf(stderr, "%s: reread-sdr-cache enabled for both sensors and SEL\n", program_name);
- }
- else if(strcmp("interpret-oem-data", argv[i]) == 0) {
- global_sel_flags |= IPMI_MONITORING_SEL_FLAGS_INTERPRET_OEM_DATA;
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_INTERPRET_OEM_DATA;
- if (debug) fprintf(stderr, "%s: interpret-oem-data enabled for both sensors and SEL\n", program_name);
- }
- else if(strcmp("assume-system-event-record", argv[i]) == 0) {
- global_sel_flags |= IPMI_MONITORING_SEL_FLAGS_ASSUME_SYSTEM_EVENT_RECORD;
- if (debug) fprintf(stderr, "%s: assume-system-event-record enabled\n", program_name);
- }
- else if(strcmp("ignore-non-interpretable-sensors", argv[i]) == 0) {
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_NON_INTERPRETABLE_SENSORS;
- if (debug) fprintf(stderr, "%s: ignore-non-interpretable-sensors enabled\n", program_name);
- }
- else if(strcmp("bridge-sensors", argv[i]) == 0) {
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_BRIDGE_SENSORS;
- if (debug) fprintf(stderr, "%s: bridge-sensors enabled\n", program_name);
- }
- else if(strcmp("shared-sensors", argv[i]) == 0) {
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_SHARED_SENSORS;
- if (debug) fprintf(stderr, "%s: shared-sensors enabled\n", program_name);
- }
- else if(strcmp("no-discrete-reading", argv[i]) == 0) {
- global_sensor_reading_flags &= ~(IPMI_MONITORING_SENSOR_READING_FLAGS_DISCRETE_READING);
- if (debug) fprintf(stderr, "%s: discrete-reading disabled\n", program_name);
- }
- else if(strcmp("ignore-scanning-disabled", argv[i]) == 0) {
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_IGNORE_SCANNING_DISABLED;
- if (debug) fprintf(stderr, "%s: ignore-scanning-disabled enabled\n", program_name);
- }
- else if(strcmp("assume-bmc-owner", argv[i]) == 0) {
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ASSUME_BMC_OWNER;
- if (debug) fprintf(stderr, "%s: assume-bmc-owner enabled\n", program_name);
- }
-#if defined(IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES) && defined(IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES)
- else if(strcmp("entity-sensor-names", argv[i]) == 0) {
- global_sel_flags |= IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES;
- global_sensor_reading_flags |= IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES;
- if (debug) fprintf(stderr, "%s: entity-sensor-names enabled for both sensors and SEL\n", program_name);
- }
-#endif
- else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata %s %s\n"
- " Copyright (C) 2023 Netdata Inc.\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " SECONDS data collection frequency\n"
- " minimum: %d\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n"
- " sel\n"
- " no-sel enable/disable SEL collection\n"
- " default: %s\n"
- "\n"
- " reread-sdr-cache re-read SDR cache on every iteration\n"
- " default: disabled\n"
- "\n"
- " interpret-oem-data attempt to parse OEM data\n"
- " default: disabled\n"
- "\n"
- " assume-system-event-record \n"
- " tread illegal SEL events records as normal\n"
- " default: disabled\n"
- "\n"
- " ignore-non-interpretable-sensors \n"
- " do not read sensors that cannot be interpreted\n"
- " default: disabled\n"
- "\n"
- " bridge-sensors bridge sensors not owned by the BMC\n"
- " default: disabled\n"
- "\n"
- " shared-sensors enable shared sensors, if found\n"
- " default: disabled\n"
- "\n"
- " no-discrete-reading do not read sensors that their event/reading type code is invalid\n"
- " default: enabled\n"
- "\n"
- " ignore-scanning-disabled \n"
- " Ignore the scanning bit and read sensors no matter what\n"
- " default: disabled\n"
- "\n"
- " assume-bmc-owner assume the BMC is the sensor owner no matter what\n"
- " (usually bridging is required too)\n"
- " default: disabled\n"
- "\n"
-#if defined(IPMI_MONITORING_SEL_FLAGS_ENTITY_SENSOR_NAMES) && defined(IPMI_MONITORING_SENSOR_READING_FLAGS_ENTITY_SENSOR_NAMES)
- " entity-sensor-names sensor names prefixed with entity id and instance\n"
- " default: disabled\n"
- "\n"
-#endif
- " hostname HOST\n"
- " username USER\n"
- " password PASS connect to remote IPMI host\n"
- " default: local IPMI processor\n"
- "\n"
- " no-auth-code-check\n"
- " noauthcodecheck don't check the authentication codes returned\n"
- "\n"
- " driver-type IPMIDRIVER\n"
- " Specify the driver type to use instead of doing an auto selection. \n"
- " The currently available outofband drivers are LAN and LAN_2_0,\n"
- " which perform IPMI 1.5 and IPMI 2.0 respectively. \n"
- " The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC.\n"
- "\n"
- " sdr-cache-dir PATH directory for SDR cache files\n"
- " default: %s\n"
- "\n"
- " sensor-config-file FILE filename to read sensor configuration\n"
- " default: %s\n"
- "\n"
- " sel-config-file FILE filename to read sel configuration\n"
- " default: %s\n"
- "\n"
- " ignore N1,N2,N3,... sensor IDs to ignore\n"
- " default: none\n"
- "\n"
- " ignore-status N1,N2,N3,... sensor IDs to ignore status (nominal/warning/critical)\n"
- " default: none\n"
- "\n"
- " -v\n"
- " -V\n"
- " version print version and exit\n"
- "\n"
- " Linux kernel module for IPMI is CPU hungry.\n"
- " On Linux run this to lower kipmiN CPU utilization:\n"
- " # echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us\n"
- "\n"
- " or create: /etc/modprobe.d/ipmi.conf with these contents:\n"
- " options ipmi_si kipmid_max_busy_us=10\n"
- "\n"
- " For more information:\n"
- " https://github.com/netdata/netdata/tree/master/collectors/freeipmi.plugin\n"
- "\n"
- , program_name, VERSION
- , update_every
- , netdata_do_sel?"enabled":"disabled"
- , sdr_cache_directory?sdr_cache_directory:"system default"
- , sensor_config_file?sensor_config_file:"system default"
- , sel_config_file?sel_config_file:"system default"
- );
- exit(1);
- }
- else if(i < argc && strcmp("hostname", argv[i]) == 0) {
- hostname = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "%s: hostname set to '%s'\n", program_name, hostname);
- continue;
- }
- else if(i < argc && strcmp("username", argv[i]) == 0) {
- username = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "%s: username set to '%s'\n", program_name, username);
- continue;
- }
- else if(i < argc && strcmp("password", argv[i]) == 0) {
- password = strdupz(argv[++i]);
- char *s = argv[i];
- // mask it be hidden from the process tree
- while(*s) *s++ = 'x';
- if(debug) fprintf(stderr, "%s: password set to '%s'\n", program_name, password);
- continue;
- }
- else if(strcmp("driver-type", argv[i]) == 0) {
- if (hostname) {
- protocol_version = netdata_parse_outofband_driver_type(argv[++i]);
- if(debug) fprintf(stderr, "%s: outband protocol version set to '%d'\n",
- program_name, protocol_version);
- }
- else {
- driver_type = netdata_parse_inband_driver_type(argv[++i]);
- if(debug) fprintf(stderr, "%s: inband driver type set to '%d'\n",
- program_name, driver_type);
- }
- continue;
- } else if (i < argc && (strcmp("noauthcodecheck", argv[i]) == 0 || strcmp("no-auth-code-check", argv[i]) == 0)) {
- if (!hostname || netdata_host_is_localhost(hostname)) {
- if (debug)
- fprintf(stderr, "%s: noauthcodecheck workaround flag is ignored for inband configuration\n",
- program_name);
-
- }
- else if (protocol_version < 0 || protocol_version == IPMI_MONITORING_PROTOCOL_VERSION_1_5) {
- workaround_flags |= IPMI_MONITORING_WORKAROUND_FLAGS_PROTOCOL_VERSION_1_5_NO_AUTH_CODE_CHECK;
-
- if (debug)
- fprintf(stderr, "%s: noauthcodecheck workaround flag enabled\n", program_name);
- }
- else {
- if (debug)
- fprintf(stderr, "%s: noauthcodecheck workaround flag is ignored for protocol version 2.0\n",
- program_name);
- }
- continue;
- }
- else if(i < argc && strcmp("sdr-cache-dir", argv[i]) == 0) {
- sdr_cache_directory = argv[++i];
-
- if(debug)
- fprintf(stderr, "%s: SDR cache directory set to '%s'\n", program_name, sdr_cache_directory);
-
- continue;
- }
- else if(i < argc && strcmp("sensor-config-file", argv[i]) == 0) {
- sensor_config_file = argv[++i];
- if(debug) fprintf(stderr, "%s: sensor config file set to '%s'\n", program_name, sensor_config_file);
- continue;
- }
- else if(i < argc && strcmp("sel-config-file", argv[i]) == 0) {
- sel_config_file = argv[++i];
- if(debug) fprintf(stderr, "%s: sel config file set to '%s'\n", program_name, sel_config_file);
- continue;
- }
- else if(i < argc && strcmp("ignore", argv[i]) == 0) {
- excluded_record_ids_parse(argv[++i], debug);
- continue;
- }
- else if(i < argc && strcmp("ignore-status", argv[i]) == 0) {
- excluded_status_record_ids_parse(argv[++i], debug);
- continue;
- }
-
- collector_error("%s(): ignoring parameter '%s'", __FUNCTION__, argv[i]);
- }
-
- errno = 0;
-
- if(freq_s && freq_s < update_every)
- collector_info("%s(): update frequency %d seconds is too small for IPMI. Using %d.",
- __FUNCTION__, freq_s, update_every);
-
- update_every = freq_s = MAX(freq_s, update_every);
- update_every_sel = MAX(update_every, update_every_sel);
-
- // ------------------------------------------------------------------------
- // initialize IPMI
-
- if(debug) {
- fprintf(stderr, "%s: calling ipmi_monitoring_init()\n", program_name);
- ipmimonitoring_init_flags |= IPMI_MONITORING_FLAGS_DEBUG|IPMI_MONITORING_FLAGS_DEBUG_IPMI_PACKETS;
- }
-
- int rc;
- if(ipmi_monitoring_init(ipmimonitoring_init_flags, &rc) < 0)
- fatal("ipmi_monitoring_init: %s", ipmi_monitoring_ctx_strerror(rc));
-
- // ------------------------------------------------------------------------
- // create the data collection threads
-
- struct ipmi_collection_thread sensors_data = {
- .type = IPMI_COLLECT_TYPE_SENSORS,
- .freq_s = update_every,
- .spinlock = NETDATA_SPINLOCK_INITIALIZER,
- .debug = debug,
- .state = {
- .debug = debug,
- .sensors = {
- .status = ICS_INIT,
- .last_iteration_ut = now_monotonic_usec(),
- .freq_ut = update_every * USEC_PER_SEC,
- .priority = IPMI_SENSORS_DASHBOARD_PRIORITY,
- .dict = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE|DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct sensor)),
- },
- },
- }, sel_data = {
- .type = IPMI_COLLECT_TYPE_SEL,
- .freq_s = update_every_sel,
- .spinlock = NETDATA_SPINLOCK_INITIALIZER,
- .debug = debug,
- .state = {
- .debug = debug,
- .sel = {
- .status = ICS_INIT,
- .last_iteration_ut = now_monotonic_usec(),
- .freq_ut = update_every_sel * USEC_PER_SEC,
- .priority = IPMI_SEL_DASHBOARD_PRIORITY,
- },
- },
- };
-
- netdata_thread_t sensors_thread = 0, sel_thread = 0;
-
- netdata_thread_create(&sensors_thread, "IPMI[sensors]", NETDATA_THREAD_OPTION_DONT_LOG, netdata_ipmi_collection_thread, &sensors_data);
-
- if(netdata_do_sel)
- netdata_thread_create(&sel_thread, "IPMI[sel]", NETDATA_THREAD_OPTION_DONT_LOG, netdata_ipmi_collection_thread, &sel_data);
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if(debug) fprintf(stderr, "%s: starting data collection\n", program_name);
-
- time_t started_t = now_monotonic_sec();
-
- size_t iteration = 0;
- usec_t step = 100 * USEC_PER_MS;
- bool global_chart_created = false;
- bool tty = isatty(fileno(stderr)) == 1;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- for(iteration = 0; 1 ; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
-
- if (!tty) {
- netdata_mutex_lock(&stdout_mutex);
- fprintf(stdout, "\n"); // keepalive to avoid parser read timeout (2 minutes) during ipmi_detect_speed_secs()
- fflush(stdout);
- netdata_mutex_unlock(&stdout_mutex);
- }
-
- spinlock_lock(&sensors_data.spinlock);
- state.sensors = sensors_data.state.sensors;
- spinlock_unlock(&sensors_data.spinlock);
-
- spinlock_lock(&sel_data.spinlock);
- state.sel = sel_data.state.sel;
- spinlock_unlock(&sel_data.spinlock);
-
- switch(state.sensors.status) {
- case ICS_RUNNING:
- step = update_every * USEC_PER_SEC;
- if(state.sensors.last_iteration_ut < now_monotonic_usec() - IPMI_RESTART_IF_SENSORS_DONT_ITERATE_EVERY_SECONDS * USEC_PER_SEC) {
- collector_error("%s(): sensors have not be collected for %zu seconds. Exiting to restart.",
- __FUNCTION__, (size_t)((now_monotonic_usec() - state.sensors.last_iteration_ut) / USEC_PER_SEC));
-
- fprintf(stdout, "EXIT\n");
- plugin_exit(0);
- }
- break;
-
- case ICS_INIT:
- continue;
-
- case ICS_INIT_FAILED:
- collector_error("%s(): sensors failed to initialize. Calling DISABLE.", __FUNCTION__);
- fprintf(stdout, "DISABLE\n");
- plugin_exit(0);
-
- case ICS_FAILED:
- collector_error("%s(): sensors fails repeatedly to collect metrics. Exiting to restart.", __FUNCTION__);
- fprintf(stdout, "EXIT\n");
- plugin_exit(0);
- }
-
- if(netdata_do_sel) {
- switch (state.sensors.status) {
- case ICS_RUNNING:
- case ICS_INIT:
- break;
-
- case ICS_INIT_FAILED:
- case ICS_FAILED:
- collector_error("%s(): SEL fails to collect events. Disabling SEL collection.", __FUNCTION__);
- netdata_do_sel = false;
- break;
- }
- }
-
- if(unlikely(debug))
- fprintf(stderr, "%s: calling send_ipmi_sensor_metrics_to_netdata()\n", program_name);
-
- static bool add_func_sensors = true;
- if (add_func_sensors) {
- add_func_sensors = false;
- struct functions_evloop_globals *wg =
- functions_evloop_init(1, "FREEIPMI", &stdout_mutex, &function_plugin_should_exit);
- functions_evloop_add_function(
- wg, "ipmi-sensors", freeimi_function_sensors, PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
- FREEIPMI_GLOBAL_FUNCTION_SENSORS();
- }
-
- state.updates.now_ut = now_monotonic_usec();
- send_ipmi_sensor_metrics_to_netdata(&state);
-
- if(netdata_do_sel)
- send_ipmi_sel_metrics_to_netdata(&state);
-
- if(unlikely(debug))
- fprintf(stderr, "%s: iteration %zu, dt %"PRIu64" usec, sensors ever collected %zu, sensors last collected %zu \n"
- , program_name
- , iteration
- , dt
- , dictionary_entries(state.sensors.dict)
- , state.sensors.collected
- );
-
- netdata_mutex_lock(&stdout_mutex);
-
- if (!global_chart_created) {
- global_chart_created = true;
-
- fprintf(stdout,
- "CHART netdata.freeipmi_availability_status '' 'Plugin availability status' 'status' "
- "plugins netdata.plugin_availability_status line 146000 %d '' '%s' '%s'\n"
- "DIMENSION available '' absolute 1 1\n",
- update_every, program_name, "");
- }
-
- fprintf(stdout,
- "BEGIN netdata.freeipmi_availability_status\n"
- "SET available = 1\n"
- "END\n");
-
- // restart check (14400 seconds)
- if (now_monotonic_sec() - started_t > IPMI_RESTART_EVERY_SECONDS) {
- collector_info("%s(): reached my lifetime expectancy. Exiting to restart.", __FUNCTION__);
- fprintf(stdout, "EXIT\n");
- plugin_exit(0);
- }
-
- fflush(stdout);
-
- netdata_mutex_unlock(&stdout_mutex);
- }
-}
diff --git a/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md b/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md
deleted file mode 100644
index c0293fc37..000000000
--- a/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md
+++ /dev/null
@@ -1,275 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/freeipmi.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/freeipmi.plugin/metadata.yaml"
-sidebar_label: "Intelligent Platform Management Interface (IPMI)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: True
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Intelligent Platform Management Interface (IPMI)
-
-
-<img src="https://netdata.cloud/img/netdata.png" width="150"/>
-
-
-Plugin: freeipmi.plugin
-Module: freeipmi
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations."
-
-
-The plugin uses open source library IPMImonitoring to communicate with sensors.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid.
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-Linux kernel module for IPMI can create big overhead.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.
-
-
-### Per Intelligent Platform Management Interface (IPMI) instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipmi.sel | events | events |
-
-### Per sensor
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| sensor | The sensor name |
-| type | One of 45 recognized sensor types (Battery, Voltage...) |
-| component | One of 25 recognized components (Processor, Peripheral). |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipmi.sensor_state | nominal, critical, warning, unknown | state |
-| ipmi.sensor_temperature_c | temperature | Celsius |
-| ipmi.sensor_temperature_f | temperature | Fahrenheit |
-| ipmi.sensor_voltage | voltage | Volts |
-| ipmi.sensor_ampere | ampere | Amps |
-| ipmi.sensor_fan_speed | rotations | RPM |
-| ipmi.sensor_power | power | Watts |
-| ipmi.sensor_reading_percent | percentage | % |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |
-
-
-## Setup
-
-### Prerequisites
-
-#### Install freeipmi.plugin
-
-When using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.
-
-When using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.
-
-When using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.
-
-
-#### Preliminary actions
-
-If you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root
-to initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:freeipmi]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-The configuration is set using command line options:
-
-```
-# netdata.conf
-[plugin:freeipmi]
- command options = opt1 opt2 ... optN
-```
-
-To display a help message listing the available command line options:
-
-```bash
-./usr/libexec/netdata/plugins.d/freeipmi.plugin --help
-```
-
-
-<details><summary>Command options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| SECONDS | Data collection frequency. | | no |
-| debug | Enable verbose output. | disabled | no |
-| no-sel | Disable System Event Log (SEL) collection. | disabled | no |
-| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |
-| interpret-oem-data | Attempt to parse OEM data. | disabled | no |
-| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |
-| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |
-| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |
-| shared-sensors | Enable shared sensors if found. | disabled | no |
-| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |
-| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |
-| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |
-| hostname HOST | Remote IPMI hostname or IP address. | local | no |
-| username USER | Username that will be used when connecting to the remote host. | | no |
-| password PASS | Password that will be used when connecting to the remote host. | | no |
-| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |
-| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |
-| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |
-| sensor-config-file FILE | Sensors configuration filename. | system default | no |
-| sel-config-file FILE | SEL configuration filename. | system default | no |
-| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |
-| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |
-| -v | Print version and exit. | | no |
-| --help | Print usage message and exit. | | no |
-
-</details>
-
-#### Examples
-
-##### Decrease data collection frequency
-
-Basic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
-
-```yaml
-[plugin:freeipmi]
- update every = 10
-
-```
-##### Disable SEL collection
-
-Append to `command options =` the options you need.
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:freeipmi]
- command options = no-sel
-
-```
-</details>
-
-##### Ignore specific sensors
-
-Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.
-
-**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).
-
-To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:
-
-ID | Name | Type | State | Reading | Units | Event
-1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'
-2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'
-3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'
-4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'
-5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'
-6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'
-7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'
-8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'
-9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'
-10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'
-11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'
-12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'
-13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'
-14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
-15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
-...
-
-`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:
-
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:freeipmi]
- command options = ignore 1,2,3,4,...
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-
-### kimpi0 CPU usage
-
-
-
-
diff --git a/collectors/freeipmi.plugin/metadata.yaml b/collectors/freeipmi.plugin/metadata.yaml
deleted file mode 100644
index f8c75c2cb..000000000
--- a/collectors/freeipmi.plugin/metadata.yaml
+++ /dev/null
@@ -1,347 +0,0 @@
-plugin_name: freeipmi.plugin
-modules:
- - meta:
- plugin_name: freeipmi.plugin
- module_name: freeipmi
- monitored_instance:
- name: Intelligent Platform Management Interface (IPMI)
- link: "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "netdata.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sensors
- - ipmi
- - freeipmi
- - ipmimonitoring
- most_popular: true
- overview:
- data_collection:
- metrics_description: |
- "Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations."
- method_description: |
- The plugin uses open source library IPMImonitoring to communicate with sensors.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid."
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: "Linux kernel module for IPMI can create big overhead."
- setup:
- prerequisites:
- list:
- - title: Install freeipmi.plugin
- description: |
- When using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.
-
- When using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.
-
- When using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.
- - title: Preliminary actions
- description: |
- If you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root
- to initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:freeipmi]"
- options:
- description: |
- The configuration is set using command line options:
-
- ```
- # netdata.conf
- [plugin:freeipmi]
- command options = opt1 opt2 ... optN
- ```
-
- To display a help message listing the available command line options:
-
- ```bash
- ./usr/libexec/netdata/plugins.d/freeipmi.plugin --help
- ```
- folding:
- title: "Command options"
- enabled: true
- list:
- - name: SECONDS
- description: Data collection frequency.
- default_value: ""
- required: false
- - name: debug
- description: Enable verbose output.
- default_value: disabled
- required: false
- - name: no-sel
- description: Disable System Event Log (SEL) collection.
- default_value: disabled
- required: false
- - name: reread-sdr-cache
- description: Re-read SDR cache on every iteration.
- default_value: disabled
- required: false
- - name: interpret-oem-data
- description: Attempt to parse OEM data.
- default_value: disabled
- required: false
- - name: assume-system-event-record
- description: treat illegal SEL events records as normal.
- default_value: disabled
- required: false
- - name: ignore-non-interpretable-sensors
- description: Do not read sensors that cannot be interpreted.
- default_value: disabled
- required: false
- - name: bridge-sensors
- description: Bridge sensors not owned by the BMC.
- default_value: disabled
- required: false
- - name: shared-sensors
- description: Enable shared sensors if found.
- default_value: disabled
- required: false
- - name: no-discrete-reading
- description: Do not read sensors if their event/reading type code is invalid.
- default_value: enabled
- required: false
- - name: ignore-scanning-disabled
- description: Ignore the scanning bit and read sensors no matter what.
- default_value: disabled
- required: false
- - name: assume-bmc-owner
- description: Assume the BMC is the sensor owner no matter what (usually bridging is required too).
- default_value: disabled
- required: false
- - name: hostname HOST
- description: Remote IPMI hostname or IP address.
- default_value: local
- required: false
- - name: username USER
- description: Username that will be used when connecting to the remote host.
- default_value: ""
- required: false
- - name: password PASS
- description: Password that will be used when connecting to the remote host.
- default_value: ""
- required: false
- - name: noauthcodecheck / no-auth-code-check
- description: Don't check the authentication codes returned.
- default_value: ""
- required: false
- - name: driver-type IPMIDRIVER
- description: Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC.
- default_value: ""
- required: false
- - name: sdr-cache-dir PATH
- description: SDR cache files directory.
- default_value: /tmp
- required: false
- - name: sensor-config-file FILE
- description: Sensors configuration filename.
- default_value: system default
- required: false
- - name: sel-config-file FILE
- description: SEL configuration filename.
- default_value: system default
- required: false
- - name: ignore N1,N2,N3,...
- description: Sensor IDs to ignore.
- default_value: ""
- required: false
- - name: ignore-status N1,N2,N3,...
- description: Sensor IDs to ignore status (nominal/warning/critical).
- default_value: ""
- required: false
- - name: -v
- description: Print version and exit.
- default_value: ""
- required: false
- - name: --help
- description: Print usage message and exit.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Decrease data collection frequency
- description: Basic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.
- config: |
- [plugin:freeipmi]
- update every = 10
- folding:
- enabled: false
- - name: Disable SEL collection
- description: Append to `command options =` the options you need.
- config: |
- [plugin:freeipmi]
- command options = no-sel
- - name: Ignore specific sensors
- description: |
- Specific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.
-
- **However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).
-
- To find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:
-
- ID | Name | Type | State | Reading | Units | Event
- 1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'
- 2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'
- 3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'
- 4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'
- 5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'
- 6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'
- 7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'
- 8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'
- 9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'
- 10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'
- 11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'
- 12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'
- 13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'
- 14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
- 15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'
- ...
-
- `freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:
- config: |
- [plugin:freeipmi]
- command options = ignore 1,2,3,4,...
- troubleshooting:
- problems:
- list:
- - name: Debug Mode
- description: |
- You can run `freeipmi.plugin` with the debug option enabled, to troubleshoot issues with it. The output should give you clues as to why the collector isn't working.
-
- - Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
- - Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
- - Run the `freeipmi.plugin` in debug mode:
-
- ```bash
- ./freeipmi.plugin 5 debug
- ```
- - name: kimpi0 CPU usage
- description: |
- There have been reports that kipmi is showing increased CPU when the IPMI is queried. To lower the CPU consumption of the system you can issue this command:
-
- ```sh
- echo 10 > /sys/module/ipmi_si/parameters/kipmid_max_busy_us
- ```
-
- You can also permanently set the above setting by creating the file `/etc/modprobe.d/ipmi.conf` with this content:
-
- ```sh
- # prevent kipmi from consuming 100% CPU
- options ipmi_si kipmid_max_busy_us=10
- ```
-
- This instructs the kernel IPMI module to pause for a tick between checking IPMI. Querying IPMI will be a lot slower now (e.g. several seconds for IPMI to respond), but `kipmi` will not use any noticeable CPU.
-
- You can also use a higher number (this is the number of microseconds to poll IPMI for a response, before waiting for a tick).
- alerts:
- - name: ipmi_sensor_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipmi.conf
- metric: ipmi.sensor_state
- info: IPMI sensor ${label:sensor} (${label:component}) state
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: |
- The plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.
- availability: []
- scopes:
- - name: global
- description: These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: ipmi.sel
- description: IPMI Events
- unit: "events"
- chart_type: area
- dimensions:
- - name: events
- - name: sensor
- description: ""
- labels:
- - name: sensor
- description: The sensor name
- - name: type
- description: One of 45 recognized sensor types (Battery, Voltage...)
- - name: component
- description: One of 25 recognized components (Processor, Peripheral).
- metrics:
- - name: ipmi.sensor_state
- description: IPMI Sensors State
- unit: "state"
- chart_type: line
- dimensions:
- - name: nominal
- - name: critical
- - name: warning
- - name: unknown
- - name: ipmi.sensor_temperature_c
- description: IPMI Sensor Temperature Celsius
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: temperature
- - name: ipmi.sensor_temperature_f
- description: IPMI Sensor Temperature Fahrenheit
- unit: "Fahrenheit"
- chart_type: line
- dimensions:
- - name: temperature
- - name: ipmi.sensor_voltage
- description: IPMI Sensor Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: voltage
- - name: ipmi.sensor_ampere
- description: IPMI Sensor Current
- unit: "Amps"
- chart_type: line
- dimensions:
- - name: ampere
- - name: ipmi.sensor_fan_speed
- description: IPMI Sensor Fans Speed
- unit: "RPM"
- chart_type: line
- dimensions:
- - name: rotations
- - name: ipmi.sensor_power
- description: IPMI Sensor Power
- unit: "Watts"
- chart_type: line
- dimensions:
- - name: power
- - name: ipmi.sensor_reading_percent
- description: IPMI Sensor Reading Percentage
- unit: "%"
- chart_type: line
- dimensions:
- - name: percentage
diff --git a/collectors/idlejitter.plugin/Makefile.am b/collectors/idlejitter.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/idlejitter.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/idlejitter.plugin/README.md b/collectors/idlejitter.plugin/README.md
deleted file mode 120000
index 1ce460b62..000000000
--- a/collectors/idlejitter.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/idle_os_jitter.md \ No newline at end of file
diff --git a/collectors/idlejitter.plugin/integrations/idle_os_jitter.md b/collectors/idlejitter.plugin/integrations/idle_os_jitter.md
deleted file mode 100644
index 44463f6f5..000000000
--- a/collectors/idlejitter.plugin/integrations/idle_os_jitter.md
+++ /dev/null
@@ -1,118 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/idlejitter.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/idlejitter.plugin/metadata.yaml"
-sidebar_label: "Idle OS Jitter"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Synthetic Checks"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Idle OS Jitter
-
-
-<img src="https://netdata.cloud/img/syslog.png" width="150"/>
-
-
-Plugin: idlejitter.plugin
-Module: idlejitter.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.
-
-
-A thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration will run by default on all supported systems.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Idle OS Jitter instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.idlejitter | min, max, average | microseconds lost/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-This integration only supports a single configuration option, and most users will not need to change it.
-
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/idlejitter.plugin/metadata.yaml b/collectors/idlejitter.plugin/metadata.yaml
deleted file mode 100644
index 0ad946994..000000000
--- a/collectors/idlejitter.plugin/metadata.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-plugin_name: idlejitter.plugin
-modules:
- - meta:
- plugin_name: idlejitter.plugin
- module_name: idlejitter.plugin
- monitored_instance:
- name: Idle OS Jitter
- link: ''
- categories:
- - data-collection.synthetic-checks
- icon_filename: 'syslog.png'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - latency
- - jitter
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Monitor delays in timing for user processes caused by scheduling limitations to optimize the system to run
- latency sensitive applications with minimal jitter, improving consistency and quality of service.
- method_description: >
- A thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it
- measures how many microseconds have passed. The difference between the requested and the actual duration of
- the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'This integration will run by default on all supported systems.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: 'netdata.conf'
- section: 'plugin:idlejitter'
- description: ''
- options:
- description: >
- This integration only supports a single configuration option, and most users will not need to change it.
- folding:
- title: ''
- enabled: false
- list:
- - name: loop time in ms
- description: >
- Specifies the target time for the data collection thread to sleep, measured in miliseconds.
- default_value: 20
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.idlejitter
- description: CPU Idle Jitter
- unit: "microseconds lost/s"
- chart_type: line
- dimensions:
- - name: min
- - name: max
- - name: average
diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.c b/collectors/idlejitter.plugin/plugin_idlejitter.c
deleted file mode 100644
index d90548869..000000000
--- a/collectors/idlejitter.plugin/plugin_idlejitter.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "daemon/common.h"
-
-#define CPU_IDLEJITTER_SLEEP_TIME_MS 20
-
-static void cpuidlejitter_main_cleanup(void *ptr) {
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *cpuidlejitter_main(void *ptr) {
- worker_register("IDLEJITTER");
- worker_register_job_name(0, "measurements");
-
- netdata_thread_cleanup_push(cpuidlejitter_main_cleanup, ptr);
-
- usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
- if(sleep_ut <= 0) {
- config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
- sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
- }
-
- RRDSET *st = rrdset_create_localhost(
- "system"
- , "idlejitter"
- , NULL
- , "idlejitter"
- , NULL
- , "CPU Idle Jitter"
- , "microseconds lost/s"
- , "idlejitter.plugin"
- , NULL
- , NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER
- , localhost->rrd_update_every
- , RRDSET_TYPE_AREA
- );
- RRDDIM *rd_min = rrddim_add(st, "min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- RRDDIM *rd_max = rrddim_add(st, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- RRDDIM *rd_avg = rrddim_add(st, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- usec_t update_every_ut = localhost->rrd_update_every * USEC_PER_SEC;
- struct timeval before, after;
-
- while (service_running(SERVICE_COLLECTORS)) {
- int iterations = 0;
- usec_t error_total = 0,
- error_min = 0,
- error_max = 0,
- elapsed = 0;
-
- while (elapsed < update_every_ut) {
- now_monotonic_high_precision_timeval(&before);
- worker_is_idle();
- sleep_usec(sleep_ut);
- worker_is_busy(0);
- now_monotonic_high_precision_timeval(&after);
-
- usec_t dt = dt_usec(&after, &before);
- elapsed += dt;
-
- usec_t error = dt - sleep_ut;
- error_total += error;
-
- if(unlikely(!iterations))
- error_min = error;
- else if(error < error_min)
- error_min = error;
-
- if(error > error_max)
- error_max = error;
-
- iterations++;
- }
-
- if(iterations) {
- rrddim_set_by_pointer(st, rd_min, error_min);
- rrddim_set_by_pointer(st, rd_max, error_max);
- rrddim_set_by_pointer(st, rd_avg, error_total / iterations);
- rrdset_done(st);
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
diff --git a/collectors/ioping.plugin/Makefile.am b/collectors/ioping.plugin/Makefile.am
deleted file mode 100644
index a9cd7c4f1..000000000
--- a/collectors/ioping.plugin/Makefile.am
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-CLEANFILES = \
- ioping.plugin \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_plugins_SCRIPTS = \
- ioping.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- ioping.plugin.in \
- README.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- ioping.conf \
- $(NULL)
diff --git a/collectors/ioping.plugin/README.md b/collectors/ioping.plugin/README.md
deleted file mode 120000
index cb660f13b..000000000
--- a/collectors/ioping.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/ioping.md \ No newline at end of file
diff --git a/collectors/ioping.plugin/integrations/ioping.md b/collectors/ioping.plugin/integrations/ioping.md
deleted file mode 100644
index 39a07ed62..000000000
--- a/collectors/ioping.plugin/integrations/ioping.md
+++ /dev/null
@@ -1,133 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ioping.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ioping.plugin/metadata.yaml"
-sidebar_label: "IOPing"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Synthetic Checks"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# IOPing
-
-
-<img src="https://netdata.cloud/img/syslog.png" width="150"/>
-
-
-Plugin: ioping.plugin
-Module: ioping.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.
-
-Plugin uses `ioping` command.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per disk
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ioping.latency | latency | microseconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |
-
-
-## Setup
-
-### Prerequisites
-
-#### Install ioping
-
-You can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `ioping.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config ioping.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Data collection frequency. | 1s | no |
-| destination | The directory/file/device to ioping. | | yes |
-| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |
-| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |
-
-</details>
-
-#### Examples
-
-##### Basic Configuration
-
-This example has the minimum configuration necessary to have the plugin running.
-
-<details><summary>Config</summary>
-
-```yaml
-destination="/dev/sda"
-
-```
-</details>
-
-
diff --git a/collectors/ioping.plugin/ioping.conf b/collectors/ioping.plugin/ioping.conf
deleted file mode 100644
index 86f0de7f4..000000000
--- a/collectors/ioping.plugin/ioping.conf
+++ /dev/null
@@ -1,40 +0,0 @@
-# no need for shebang - this file is sourced from ioping.plugin
-
-# ioping.plugin requires a recent version of ioping.
-#
-# You can get it on your system, by running:
-#
-# /usr/libexec/netdata/plugins.d/ioping.plugin install
-
-# -----------------------------------------------------------------------------
-# configuration options
-
-# The ioping binary to use. We need one that can output netdata friendly info
-# (supporting: -N). If you have multiple versions, put here the full filename
-# of the right one
-
-#ioping="/usr/libexec/netdata/plugins.d/ioping"
-
-
-# The directory/file/device to ioping
-
-destination=""
-
-
-# The update frequency of the chart in seconds (symbolic modifiers are supported)
-# the default is inherited from netdata
-
-#update_every="1s"
-
-
-# The request size in bytes to ioping the destination (symbolic modifiers are supported)
-# by default 4k chunks are used
-
-#request_size="4k"
-
-
-# Other ioping options
-# the defaults:
-# -T 1000000 = maximum valid request time (us)
-
-#ioping_opts="-T 1000000"
diff --git a/collectors/ioping.plugin/ioping.plugin.in b/collectors/ioping.plugin/ioping.plugin.in
deleted file mode 100755
index 171e384db..000000000
--- a/collectors/ioping.plugin/ioping.plugin.in
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-#
-# This plugin requires a latest version of ioping.
-# You can compile it from source, by running me with option: install
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@"
-export LC_ALL=C
-
-usage="$(basename "$0") [install] [-h] [-e]
-
-where:
- install install ioping binary
- -e, --env path to environment file (defaults to '/etc/netdata/.environment'
- -h show this help text"
-
-INSTALL=0
-ENVIRONMENT_FILE="/etc/netdata/.environment"
-
-while :; do
- case "$1" in
- -h | --help)
- echo "$usage" >&2
- exit 1
- ;;
- install)
- INSTALL=1
- shift
- ;;
- -e | --env)
- ENVIRONMENT_FILE="$2"
- shift 2
- ;;
- -*)
- echo "$usage" >&2
- exit 1
- ;;
- *) break ;;
- esac
-done
-
-if [ "$INSTALL" == "1" ]
- then
- [ "${UID}" != 0 ] && echo >&2 "Please run me as root. This will install a single binary file: /usr/libexec/netdata/plugins.d/ioping." && exit 1
-
- source "${ENVIRONMENT_FILE}" || exit 1
-
- run() {
- printf >&2 " > "
- printf >&2 "%q " "${@}"
- printf >&2 "\n"
- "${@}" || exit 1
- }
-
- download() {
- local git="$(which git 2>/dev/null || command -v git 2>/dev/null)"
- [ ! -z "${git}" ] && run git clone "${1}" "${2}" && return 0
-
- echo >&2 "Cannot find 'git' in this system." && exit 1
- }
-
- tmp=$(mktemp -d /tmp/netdata-ioping-XXXXXX)
- [ ! -d "${NETDATA_PREFIX}/usr/libexec/netdata" ] && run mkdir -p "${NETDATA_PREFIX}/usr/libexec/netdata"
-
- run cd "${tmp}"
-
- if [ -d ioping-netdata ]
- then
- run rm -rf ioping-netdata || exit 1
- fi
-
- download 'https://github.com/netdata/ioping.git' 'ioping-netdata'
- [ $? -ne 0 ] && exit 1
- run cd ioping-netdata || exit 1
-
- INSTALL_PATH="${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/ioping"
-
- run make clean
- run make
- run mv ioping "${INSTALL_PATH}"
- run chown root:"${NETDATA_GROUP}" "${INSTALL_PATH}"
- run chmod 4750 "${INSTALL_PATH}"
- echo >&2
- echo >&2 "All done, you have a compatible ioping now at ${INSTALL_PATH}."
- echo >&2
-
- exit 0
-fi
-
-# -----------------------------------------------------------------------------
-# logging
-
-PROGRAM_NAME="$(basename "${0}")"
-
-# these should be the same with syslog() priorities
-NDLP_EMERG=0 # system is unusable
-NDLP_ALERT=1 # action must be taken immediately
-NDLP_CRIT=2 # critical conditions
-NDLP_ERR=3 # error conditions
-NDLP_WARN=4 # warning conditions
-NDLP_NOTICE=5 # normal but significant condition
-NDLP_INFO=6 # informational
-NDLP_DEBUG=7 # debug-level messages
-
-# the max (numerically) log level we will log
-LOG_LEVEL=$NDLP_INFO
-
-set_log_min_priority() {
- case "${NETDATA_LOG_LEVEL,,}" in
- "emerg" | "emergency")
- LOG_LEVEL=$NDLP_EMERG
- ;;
-
- "alert")
- LOG_LEVEL=$NDLP_ALERT
- ;;
-
- "crit" | "critical")
- LOG_LEVEL=$NDLP_CRIT
- ;;
-
- "err" | "error")
- LOG_LEVEL=$NDLP_ERR
- ;;
-
- "warn" | "warning")
- LOG_LEVEL=$NDLP_WARN
- ;;
-
- "notice")
- LOG_LEVEL=$NDLP_NOTICE
- ;;
-
- "info")
- LOG_LEVEL=$NDLP_INFO
- ;;
-
- "debug")
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- esac
-}
-
-set_log_min_priority
-
-log() {
- local level="${1}"
- shift 1
-
- [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
-
- systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
-INVOCATION_ID=${NETDATA_INVOCATION_ID}
-SYSLOG_IDENTIFIER=${PROGRAM_NAME}
-PRIORITY=${level}
-THREAD_TAG=ioping.plugin
-ND_LOG_SOURCE=collector
-MESSAGE=${MODULE_NAME}: ${*//\\n/--NEWLINE--}
-
-EOFLOG
- # AN EMPTY LINE IS NEEDED ABOVE
-}
-
-info() {
- log "$NDLP_INFO" "${@}"
-}
-
-warning() {
- log "$NDLP_WARN" "${@}"
-}
-
-error() {
- log "$NDLP_ERR" "${@}"
-}
-
-disable() {
- log "${@}"
- echo "DISABLE"
- exit 1
-}
-
-fatal() {
- disable "$NDLP_ALERT" "${@}"
-}
-
-debug() {
- log "$NDLP_DEBUG" "${@}"
-}
-
-# -----------------------------------------------------------------------------
-
-# store in ${plugin} the name we run under
-# this allows us to copy/link ioping.plugin under a different name
-# to have multiple ioping plugins running with different settings
-plugin="${PROGRAM_NAME/.plugin/}"
-
-
-# -----------------------------------------------------------------------------
-
-# the frequency to send info to netdata
-# passed by netdata as the first parameter
-update_every="${1-1}"
-
-# the netdata configuration directory
-# passed by netdata as an environment variable
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
-
-# the netdata directory for internal binaries
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="@pluginsdir_POST@"
-
-# -----------------------------------------------------------------------------
-# configuration options
-# can be overwritten at /etc/netdata/ioping.conf
-
-# the ioping binary to use
-# we need one that can output netdata friendly info (supporting: -N)
-# if you have multiple versions, put here the full filename of the right one
-ioping="${NETDATA_PLUGINS_DIR}/ioping"
-
-# the destination to ioping
-destination=""
-
-# the request size in bytes to ping the disk
-request_size="4k"
-
-# ioping options
-ioping_opts="-T 1000000"
-
-# -----------------------------------------------------------------------------
-# load the configuration files
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/${plugin}.conf" "${NETDATA_USER_CONFIG_DIR}/${plugin}.conf"; do
- if [ -f "${CONFIG}" ]; then
- debug "Loading config file '${CONFIG}'..."
- source "${CONFIG}"
- [ $? -ne 0 ] && warn "Failed to load config file '${CONFIG}'."
- elif [[ $CONFIG =~ ^$NETDATA_USER_CONFIG_DIR ]]; then
- debug "Cannot find file '${CONFIG}'."
- fi
-done
-
-if [ -z "${destination}" ]
-then
- disable $NDLP_DEBUG "destination is not configured - nothing to do."
-fi
-
-if [ ! -f "${ioping}" ]
-then
- disable $NDLP_ERR "ioping command is not found. Please set its full path in '${NETDATA_USER_CONFIG_DIR}/${plugin}.conf'"
-fi
-
-if [ ! -x "${ioping}" ]
-then
- disable $NDLP_ERR "ioping command '${ioping}' is not executable - cannot proceed."
-fi
-
-# the ioping options we will use
-options=( -N -i ${update_every} -s ${request_size} ${ioping_opts} ${destination} )
-
-# execute ioping
-debug "starting ioping: ${ioping} ${options[*]}"
-
-exec "${ioping}" "${options[@]}"
-
-# if we cannot execute ioping, stop
-error "command '${ioping} ${options[*]}' failed to be executed (returned code $?)."
diff --git a/collectors/ioping.plugin/metadata.yaml b/collectors/ioping.plugin/metadata.yaml
deleted file mode 100644
index e3ec96162..000000000
--- a/collectors/ioping.plugin/metadata.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-plugin_name: ioping.plugin
-modules:
- - meta:
- plugin_name: ioping.plugin
- module_name: ioping.plugin
- monitored_instance:
- name: IOPing
- link: "https://github.com/koct9i/ioping"
- categories:
- - data-collection.synthetic-checks
- icon_filename: "syslog.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations."
- method_description: "Plugin uses `ioping` command."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Install ioping
- description: |
- You can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).
- configuration:
- file:
- name: "ioping.conf"
- description: "File with options to specify hardware to monitor and arguments for ioping command."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Data collection frequency.
- default_value: 1s
- required: false
- - name: destination
- description: The directory/file/device to ioping.
- default_value: ""
- required: true
- - name: request_size
- description: The request size in bytes to ioping the destination (symbolic modifiers are supported)
- default_value: 4k
- required: false
- - name: ioping_opts
- description: Options passed to `ioping` commands.
- default_value: -T 1000000
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic Configuration
- description: This example has the minimum configuration necessary to have the plugin running.
- config: |
- destination="/dev/sda"
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ioping_disk_latency
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ioping.conf
- metric: ioping.latency
- info: average I/O latency over the last 10 seconds
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: disk
- description: ""
- labels: []
- metrics:
- - name: ioping.latency
- description: Read Latency
- unit: "microseconds"
- chart_type: line
- dimensions:
- - name: latency
diff --git a/collectors/log2journal/Makefile.am b/collectors/log2journal/Makefile.am
deleted file mode 100644
index b13d2160b..000000000
--- a/collectors/log2journal/Makefile.am
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- tests.sh \
- README.md \
- tests.d/* \
- $(NULL)
-
-log2journalconfigdir=$(libconfigdir)/log2journal.d
-dist_log2journalconfig_DATA = \
- log2journal.d/nginx-combined.yaml \
- log2journal.d/nginx-json.yaml \
- log2journal.d/default.yaml \
- $(NULL)
diff --git a/collectors/log2journal/README.md b/collectors/log2journal/README.md
deleted file mode 100644
index 16ccc033c..000000000
--- a/collectors/log2journal/README.md
+++ /dev/null
@@ -1,912 +0,0 @@
-
-# log2journal
-
-`log2journal` and `systemd-cat-native` can be used to convert a structured log file, such as the ones generated by web servers, into `systemd-journal` entries.
-
-By combining these tools you can create advanced log processing pipelines sending any kind of structured text logs to systemd-journald. This is a simple, but powerful and efficient way to handle log processing.
-
-The process involves the usual piping of shell commands, to get and process the log files in realtime.
-
-The result is like this: nginx logs into systemd-journal:
-
-![image](https://github.com/netdata/netdata/assets/2662304/16b471ff-c5a1-4fcc-bcd5-83551e089f6c)
-
-
-The overall process looks like this:
-
-```bash
-tail -F /var/log/nginx/*.log |\ # outputs log lines
- log2journal 'PATTERN' |\ # outputs Journal Export Format
- systemd-cat-native # send to local/remote journald
-```
-
-These are the steps:
-
-1. `tail -F /var/log/nginx/*.log`<br/>this command will tail all `*.log` files in `/var/log/nginx/`. We use `-F` instead of `-f` to ensure that files will still be tailed after log rotation.
-2. `log2joural` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this:
- ```bash
- KEY1=VALUE1 # << start of the first log line
- KEY2=VALUE2
- # << log lines separator
- KEY1=VALUE1 # << start of the second log line
- KEY2=VALUE2
- ```
-3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`.
-
-
-## Processing pipeline
-
-The sequence of processing in Netdata's `log2journal` is designed to methodically transform and prepare log data for export in the systemd Journal Export Format. This transformation occurs through a pipeline of stages, each with a specific role in processing the log entries. Here's a description of each stage in the sequence:
-
-1. **Input**<br/>
- The tool reads one log line at a time from the input source. It supports different input formats such as JSON, logfmt, and free-form logs defined by PCRE2 patterns.
-
-2. **Extract Fields and Values**<br/>
- Based on the input format (JSON, logfmt, or custom pattern), it extracts fields and their values from each log line. In the case of JSON and logfmt, it automatically extracts all fields. For custom patterns, it uses PCRE2 regular expressions, and fields are extracted based on sub-expressions defined in the pattern.
-
-3. **Transliteration**<br/>
- Extracted fields are transliterated to the limited character set accepted by systemd-journal: capitals A-Z, digits 0-9, underscores.
-
-4. **Apply Optional Prefix**<br/>
- If a prefix is specified, it is added to all keys. This happens before any other processing so that all subsequent matches and manipulations take the prefix into account.
-
-5. **Rename Fields**<br/>
- Renames fields as specified in the configuration. This is used to change the names of the fields to match desired or required naming conventions.
-
-6. **Inject New Fields**<br/>
- New fields are injected into the log data. This can include constants or values derived from other fields, using variable substitution.
-
-7. **Rewrite Field Values**<br/>
- Applies rewriting rules to alter the values of the fields. This can involve complex transformations, including regular expressions and variable substitutions. The rewrite rules can also inject new fields into the data.
-
-8. **Filter Fields**<br/>
- Fields are filtered based on include and exclude patterns. This stage selects which fields are to be sent to the journal, allowing for selective logging.
-
-9. **Output**<br/>
- Finally, the processed log data is output in the Journal Export Format. This format is compatible with systemd's journaling system and can be sent to local or remote systemd journal systems, by piping the output of `log2journal` to `systemd-cat-native`.
-
-This pipeline ensures a flexible and comprehensive approach to log processing, allowing for a wide range of modifications and customizations to fit various logging requirements. Each stage builds upon the previous one, enabling complex log transformations and enrichments before the data is exported to the systemd journal.
-
-## Real-life example
-
-We have an nginx server logging in this standard combined log format:
-
-```bash
- log_format combined '$remote_addr - $remote_user [$time_local] '
- '"$request" $status $body_bytes_sent '
- '"$http_referer" "$http_user_agent"';
-```
-
-### Extracting fields with a pattern
-
-First, let's find the right pattern for `log2journal`. We ask ChatGPT:
-
-```
-My nginx log uses this log format:
-
-log_format access '$remote_addr - $remote_user [$time_local] '
- '"$request" $status $body_bytes_sent '
- '"$http_referer" "$http_user_agent"';
-
-I want to use `log2joural` to convert this log for systemd-journal.
-`log2journal` accepts a PCRE2 regular expression, using the named groups
-in the pattern as the journal fields to extract from the logs.
-
-Please give me the PCRE2 pattern to extract all the fields from my nginx
-log files.
-```
-
-ChatGPT replies with this:
-
-```regexp
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-```
-
-Let's see what the above says:
-
-1. `(?x)`: enable PCRE2 extended mode. In this mode spaces and newlines in the pattern are ignored. To match a space you have to use `\s`. This mode allows us to split the pattern is multiple lines and add comments to it.
-1. `^`: match the beginning of the line
-2. `(?<remote_addr[^ ]+)`: match anything up to the first space (`[^ ]+`), and name it `remote_addr`.
-3. `\s`: match a space
-4. `-`: match a hyphen
-5. and so on...
-
-We edit `nginx.yaml` and add it, like this:
-
-```yaml
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-```
-
-Let's test it with a sample line (instead of `tail`):
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 104 0.001 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml
-BODY_BYTES_SENT=4172
-HTTP_REFERER=-
-HTTP_USER_AGENT=Go-http-client/1.1
-REMOTE_ADDR=1.2.3.4
-REMOTE_USER=-
-REQUEST=GET /index.html HTTP/1.1
-REQUEST_METHOD=GET
-REQUEST_URI=/index.html
-SERVER_PROTOCOL=HTTP/1.1
-STATUS=200
-TIME_LOCAL=19/Nov/2023:00:24:43 +0000
-
-```
-
-As you can see, it extracted all the fields and made them capitals, as systemd-journal expects them.
-
-### Prefixing field names
-
-To make sure the fields are unique for nginx and do not interfere with other applications, we should prefix them with `NGINX_`:
-
-```yaml
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-
-prefix: 'NGINX_' # <<< we added this
-```
-
-And let's try it:
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml
-NGINX_BODY_BYTES_SENT=4172
-NGINX_HTTP_REFERER=-
-NGINX_HTTP_USER_AGENT=Go-http-client/1.1
-NGINX_REMOTE_ADDR=1.2.3.4
-NGINX_REMOTE_USER=-
-NGINX_REQUEST=GET /index.html HTTP/1.1
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/index.html
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000
-
-```
-
-### Renaming fields
-
-Now, all fields start with `NGINX_` but we want `NGINX_REQUEST` to be the `MESSAGE` of the log line, as we will see it by default in `journalctl` and the Netdata dashboard. Let's rename it:
-
-```yaml
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-
-prefix: 'NGINX_'
-
-rename: # <<< we added this
- - new_key: MESSAGE # <<< we added this
- old_key: NGINX_REQUEST # <<< we added this
-```
-
-Let's test it:
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml
-MESSAGE=GET /index.html HTTP/1.1 # <<< renamed !
-NGINX_BODY_BYTES_SENT=4172
-NGINX_HTTP_REFERER=-
-NGINX_HTTP_USER_AGENT=Go-http-client/1.1
-NGINX_REMOTE_ADDR=1.2.3.4
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/index.html
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000
-
-```
-
-### Injecting new fields
-
-To have a complete message in journals we need 3 fields: `MESSAGE`, `PRIORITY` and `SYSLOG_IDENTIFIER`. We have already added `MESSAGE` by renaming `NGINX_REQUEST`. We can also inject a `SYSLOG_IDENTIFIER` and `PRIORITY`.
-
-Ideally, we would want the 5xx errors to be red in our `journalctl` output and the dashboard. To achieve that we need to set the `PRIORITY` field to the right log level. Log priorities are numeric and follow the `syslog` priorities. Checking `/usr/include/sys/syslog.h` we can see these:
-
-```c
-#define LOG_EMERG 0 /* system is unusable */
-#define LOG_ALERT 1 /* action must be taken immediately */
-#define LOG_CRIT 2 /* critical conditions */
-#define LOG_ERR 3 /* error conditions */
-#define LOG_WARNING 4 /* warning conditions */
-#define LOG_NOTICE 5 /* normal but significant condition */
-#define LOG_INFO 6 /* informational */
-#define LOG_DEBUG 7 /* debug-level messages */
-```
-
-Avoid setting priority to 0 (`LOG_EMERG`), because these will be on your terminal (the journal uses `wall` to let you know of such events). A good priority for errors is 3 (red), or 4 (yellow).
-
-To set the PRIORITY field in the output, we can use `NGINX_STATUS`. We will do this in 2 steps: a) inject the priority field as a copy is `NGINX_STATUS` and then b) use a pattern on its value to rewrite it to the priority level we want.
-
-First, let's inject `SYSLOG_IDENTIFIER` and `PRIORITY`:
-
-```yaml
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-
-prefix: 'NGINX_'
-
-rename:
- - new_key: MESSAGE
- old_key: NGINX_REQUEST
-
-inject: # <<< we added this
- - key: PRIORITY # <<< we added this
- value: '${NGINX_STATUS}' # <<< we added this
-
- - key: SYSLOG_IDENTIFIER # <<< we added this
- value: 'nginx-log' # <<< we added this
-```
-
-Let's see what this does:
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml
-MESSAGE=GET /index.html HTTP/1.1
-NGINX_BODY_BYTES_SENT=4172
-NGINX_HTTP_REFERER=-
-NGINX_HTTP_USER_AGENT=Go-http-client/1.1
-NGINX_REMOTE_ADDR=1.2.3.4
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/index.html
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000
-PRIORITY=200 # <<< PRIORITY added
-SYSLOG_IDENTIFIER=nginx-log # <<< SYSLOG_IDENTIFIER added
-
-```
-
-### Rewriting field values
-
-Now we need to rewrite `PRIORITY` to the right syslog level based on its value (`NGINX_STATUS`). We will assign the priority 6 (info) when the status is 1xx, 2xx, 3xx, priority 5 (notice) when status is 4xx, priority 3 (error) when status is 5xx and anything else will go to priority 4 (warning). Let's do it:
-
-```yaml
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<remote_addr>[^ ]+) \s - \s
- (?<remote_user>[^ ]+) \s
- \[
- (?<time_local>[^\]]+)
- \]
- \s+ "
- (?<request>
- (?<request_method>[A-Z]+) \s+
- (?<request_uri>[^ ]+) \s+
- (?<server_protocol>[^"]+)
- )
- " \s+
- (?<status>\d+) \s+
- (?<body_bytes_sent>\d+) \s+
- "(?<http_referer>[^"]*)" \s+
- "(?<http_user_agent>[^"]*)"
-
-prefix: 'NGINX_'
-
-rename:
- - new_key: MESSAGE
- old_key: NGINX_REQUEST
-
-inject:
- - key: PRIORITY
- value: '${NGINX_STATUS}'
-
-rewrite: # <<< we added this
- - key: PRIORITY # <<< we added this
- match: '^[123]' # <<< we added this
- value: 6 # <<< we added this
-
- - key: PRIORITY # <<< we added this
- match: '^4' # <<< we added this
- value: 5 # <<< we added this
-
- - key: PRIORITY # <<< we added this
- match: '^5' # <<< we added this
- value: 3 # <<< we added this
-
- - key: PRIORITY # <<< we added this
- match: '.*' # <<< we added this
- value: 4 # <<< we added this
-```
-
-Rewrite rules are processed in order and the first matching a field, stops by default processing for this field. This is why the last rule, that matches everything does not always change the priority to 4.
-
-Let's test it:
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml
-MESSAGE=GET /index.html HTTP/1.1
-NGINX_BODY_BYTES_SENT=4172
-NGINX_HTTP_REFERER=-
-NGINX_HTTP_USER_AGENT=Go-http-client/1.1
-NGINX_REMOTE_ADDR=1.2.3.4
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/index.html
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000
-PRIORITY=6 # <<< PRIORITY rewritten here
-SYSLOG_IDENTIFIER=nginx-log
-
-```
-
-Rewrite rules are powerful. You can have named groups in them, like in the main pattern, to extract sub-fields from them, which you can then use in variable substitution. You can use rewrite rules to anonymize the URLs, e.g to remove customer IDs or transaction details from them.
-
-### Sending logs to systemd-journal
-
-Now the message is ready to be sent to a systemd-journal. For this we use `systemd-cat-native`. This command can send such messages to a journal running on the localhost, a local journal namespace, or a `systemd-journal-remote` running on another server. By just appending `| systemd-cat-native` to the command, the message will be sent to the local journal.
-
-
-```bash
-# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml | systemd-cat-native
-# no output
-
-# let's find the message
-# journalctl -r -o verbose SYSLOG_IDENTIFIER=nginx-log
-Wed 2023-12-06 13:23:07.083299 EET [s=5290f0133f25407aaa1e2c451c0e4756;i=57194;b=0dfa96ecc2094cecaa8ec0efcb93b865;m=b133308867;t=60bd59346a289;x=5c1bdacf2b9c4bbd]
- PRIORITY=6
- _UID=0
- _GID=0
- _CAP_EFFECTIVE=1ffffffffff
- _SELINUX_CONTEXT=unconfined
- _BOOT_ID=0dfa96ecc2094cecaa8ec0efcb93b865
- _MACHINE_ID=355c8eca894d462bbe4c9422caf7a8bb
- _HOSTNAME=lab-logtest-src
- _RUNTIME_SCOPE=system
- _TRANSPORT=journal
- MESSAGE=GET /index.html HTTP/1.1
- NGINX_BODY_BYTES_SENT=4172
- NGINX_HTTP_REFERER=-
- NGINX_HTTP_USER_AGENT=Go-http-client/1.1
- NGINX_REMOTE_ADDR=1.2.3.4
- NGINX_REMOTE_USER=-
- NGINX_REQUEST_METHOD=GET
- NGINX_REQUEST_URI=/index.html
- NGINX_SERVER_PROTOCOL=HTTP/1.1
- NGINX_STATUS=200
- NGINX_TIME_LOCAL=19/Nov/2023:00:24:43 +0000
- SYSLOG_IDENTIFIER=nginx-log
- _PID=114343
- _COMM=systemd-cat-nat
- _AUDIT_SESSION=253
- _AUDIT_LOGINUID=1000
- _SYSTEMD_CGROUP=/user.slice/user-1000.slice/session-253.scope
- _SYSTEMD_SESSION=253
- _SYSTEMD_OWNER_UID=1000
- _SYSTEMD_UNIT=session-253.scope
- _SYSTEMD_SLICE=user-1000.slice
- _SYSTEMD_USER_SLICE=-.slice
- _SYSTEMD_INVOCATION_ID=c59e33ead8c24880b027e317b89f9f76
- _SOURCE_REALTIME_TIMESTAMP=1701861787083299
-
-```
-
-So, the log line, with all its fields parsed, ended up in systemd-journal. Now we can send all the nginx logs to systemd-journal like this:
-
-```bash
-tail -F /var/log/nginx/access.log |\
- log2journal -f nginx.yaml |\
- systemd-cat-native
-```
-
-## Best practices
-
-**Create a systemd service unit**: Add the above commands to a systemd unit file. When you run it in a systemd unit file you will be able to start/stop it and also see its status. Furthermore you can use the `LogNamespace=` directive of systemd service units to isolate your nginx logs from the logs of the rest of the system. Here is how to do it:
-
-Create the file `/etc/systemd/system/nginx-logs.service` (change `/path/to/nginx.yaml` to the right path):
-
-```
-[Unit]
-Description=NGINX Log to Systemd Journal
-After=network.target
-
-[Service]
-ExecStart=/bin/sh -c 'tail -F /var/log/nginx/access.log | log2journal -f /path/to/nginx.yaml' | systemd-cat-native
-LogNamespace=nginx-logs
-Restart=always
-RestartSec=3
-
-[Install]
-WantedBy=multi-user.target
-```
-
-Reload systemd to grab this file:
-
-```bash
-sudo systemctl daemon-reload
-```
-
-Enable and start the service:
-
-```bash
-sudo systemctl enable nginx-logs.service
-sudo systemctl start nginx-logs.service
-```
-
-To see the logs of the namespace, use:
-
-```bash
-journalctl -f --namespace=nginx-logs
-```
-
-Netdata will automatically pick the new namespace and present it at the list of sources of the dashboard.
-
-You can also instruct `systemd-cat-native` to log to a remote system, sending the logs to a `systemd-journal-remote` instance running on another server. Check [the manual of systemd-cat-native](https://github.com/netdata/netdata/blob/master/libnetdata/log/systemd-cat-native.md).
-
-
-## Performance
-
-`log2journal` and `systemd-cat-native` have been designed to process hundreds of thousands of log lines per second. They both utilize high performance indexing hashtables to speed up lookups, and queues that dynamically adapt to the number of log lines offered, offering a smooth and fast experience under all conditions.
-
-In our tests, the combined CPU utilization of `log2journal` and `systemd-cat-native` versus `promtail` with similar configuration is 1 to 5. So, `log2journal` and `systemd-cat-native` combined, are 5 times faster than `promtail`.
-
-### PCRE2 patterns
-
-The key characteristic that can influence the performance of a logs processing pipeline using these tools, is the quality of the PCRE2 patterns used. Poorly created PCRE2 patterns can make processing significantly slower, or CPU consuming.
-
-Especially the pattern `.*` seems to have the biggest impact on CPU consumption, especially when multiple `.*` are on the same pattern.
-
-Usually we use `.*` to indicate that we need to match everything up to a character, e.g. `.* ` to match up to a space. By replacing it with `[^ ]+` (meaning: match at least a character up to a space), the regular expression engine can be a lot more efficient, reducing the overall CPU utilization significantly.
-
-### Performance of systemd journals
-
-The ingestion pipeline of logs, from `tail` to `systemd-journald` or `systemd-journal-remote` is very efficient in all aspects. CPU utilization is better than any other system we tested and RAM usage is independent of the number of fields indexed, making systemd-journal one of the most efficient log management engines for ingesting high volumes of structured logs.
-
-High fields cardinality does not have a noticable impact on systemd-journal. The amount of fields indexed and the amount of unique values per field, have a linear and predictable result in the resource utilization of `systemd-journald` and `systemd-journal-remote`. This is unlike other logs management solutions, like Loki, that their RAM requirements grow exponentially as the cardinality increases, making it impractical for them to index the amount of information systemd journals can index.
-
-However, the number of fields added to journals influences the overall disk footprint. Less fields means more log entries per journal file, smaller overall disk footprint and faster queries.
-
-systemd-journal files are primarily designed for security and reliability. This comes at the cost of disk footprint. The internal structure of journal files is such that in case of corruption, minimum data loss will incur. To achieve such a unique characteristic, certain data within the files need to be aligned at predefined boundaries, so that in case there is a corruption, non-corrupted parts of the journal file can be recovered.
-
-Despite the fact that systemd-journald employees several techniques to optimize disk footprint, like deduplication of log entries, shared indexes for fields and their values, compression of long log entries, etc. the disk footprint of journal files is generally 10x more compared to other monitoring solutions, like Loki.
-
-This can be improved by storing journal files in a compressed filesystem. In our tests, a compressed filesystem can save up to 75% of the space required by journal files. The journal files will still be bigger than the overall disk footprint of other solutions, but the flexibility (index any number of fields), reliability (minimal potential data loss) and security (tampering protection and sealing) features of systemd-journal justify the difference.
-
-When using versions of systemd prior to 254 and you are centralizing logs to a remote system, `systemd-journal-remote` creates very small files (32MB). This results in increased duplication of information across the files, increasing the overall disk footprint. systemd versions 254+, added options to `systemd-journal-remote` to control the max size per file. This can significantly reduce the duplication of information.
-
-Another limitation of the `systemd-journald` ecosystem is the uncompressed transmission of logs across systems. `systemd-journal-remote` up to version 254 that we tested, accepts encrypted, but uncompressed data. This means that when centralizing logs to a logs server, the bandwidth required will be increased compared to other log management solution.
-
-## Security Considerations
-
-`log2journal` and `systemd-cat-native` are used to convert log files to structured logs in the systemd-journald ecosystem.
-
-Systemd-journal is a logs management solution designed primarily for security and reliability. When configured properly, it can reliably and securely store your logs, ensuring they will available and unchanged for as long as you need them.
-
-When sending logs to a remote system, `systemd-cat-native` can be configured the same way `systemd-journal-upload` is configured, using HTTPS and private keys to encrypt and secure their transmission over the network.
-
-When dealing with sensitive logs, organizations usually follow 2 strategies:
-
-1. Anonymize the logs before storing them, so that the stored logs do not have any sensitive information.
-2. Store the logs in full, including sensitive information, and carefully control who and how has access to them.
-
-Netdata can help in both cases.
-
-If you want to anonymize the logs before storing them, use rewriting rules at the `log2journal` phase to remove sensitive information from them. This process usually means matching the sensitive part and replacing with `XXX` or `CUSTOMER_ID`, or `CREDIT_CARD_NUMBER`, so that the resulting log entries stored in journal files will not include any such sensitive information.
-
-If on other hand your organization prefers to maintain the full logs and control who and how has access on them, use Netdata Cloud to assign roles to your team members and control which roles can access the journal logs in your environment.
-
-## `log2journal` options
-
-```
-
-Netdata log2journal v1.43.0-341-gdac4df856
-
-Convert logs to systemd Journal Export Format.
-
- - JSON logs: extracts all JSON fields.
- - logfmt logs: extracts all logfmt fields.
- - free-form logs: uses PCRE2 patterns to extracts fields.
-
-Usage: ./log2journal [OPTIONS] PATTERN|json
-
-Options:
-
- --file /path/to/file.yaml or -f /path/to/file.yaml
- Read yaml configuration file for instructions.
-
- --config CONFIG_NAME or -c CONFIG_NAME
- Run with the internal YAML configuration named CONFIG_NAME.
- Available internal YAML configs:
-
- nginx-combined nginx-json default
-
---------------------------------------------------------------------------------
- INPUT PROCESSING
-
- PATTERN
- PATTERN should be a valid PCRE2 regular expression.
- RE2 regular expressions (like the ones usually used in Go applications),
- are usually valid PCRE2 patterns too.
- Sub-expressions without named groups are evaluated, but their matches are
- not added to the output.
-
- - JSON mode
- JSON mode is enabled when the pattern is set to: json
- Field names are extracted from the JSON logs and are converted to the
- format expected by Journal Export Format (all caps, only _ is allowed).
-
- - logfmt mode
- logfmt mode is enabled when the pattern is set to: logfmt
- Field names are extracted from the logfmt logs and are converted to the
- format expected by Journal Export Format (all caps, only _ is allowed).
-
- All keys extracted from the input, are transliterated to match Journal
- semantics (capital A-Z, digits 0-9, underscore).
-
- In a YAML file:
- ```yaml
- pattern: 'PCRE2 pattern | json | logfmt'
- ```
-
---------------------------------------------------------------------------------
- GLOBALS
-
- --prefix PREFIX
- Prefix all fields with PREFIX. The PREFIX is added before any other
- processing, so that the extracted keys have to be matched with the PREFIX in
- them. PREFIX is NOT transliterated and it is assumed to be systemd-journal
- friendly.
-
- In a YAML file:
- ```yaml
- prefix: 'PREFIX_' # prepend all keys with this prefix.
- ```
-
- --filename-key KEY
- Add a field with KEY as the key and the current filename as value.
- Automatically detects filenames when piped after 'tail -F',
- and tail matches multiple filenames.
- To inject the filename when tailing a single file, use --inject.
-
- In a YAML file:
- ```yaml
- filename:
- key: KEY
- ```
-
---------------------------------------------------------------------------------
- RENAMING OF KEYS
-
- --rename NEW=OLD
- Rename fields. OLD has been transliterated and PREFIX has been added.
- NEW is assumed to be systemd journal friendly.
-
- Up to 512 renaming rules are allowed.
-
- In a YAML file:
- ```yaml
- rename:
- - new_key: KEY1
- old_key: KEY2 # transliterated with PREFIX added
- - new_key: KEY3
- old_key: KEY4 # transliterated with PREFIX added
- # add as many as required
- ```
-
---------------------------------------------------------------------------------
- INJECTING NEW KEYS
-
- --inject KEY=VALUE
- Inject constant fields to the output (both matched and unmatched logs).
- --inject entries are added to unmatched lines too, when their key is
- not used in --inject-unmatched (--inject-unmatched override --inject).
- VALUE can use variable like ${OTHER_KEY} to be replaced with the values
- of other keys available.
-
- Up to 512 fields can be injected.
-
- In a YAML file:
- ```yaml
- inject:
- - key: KEY1
- value: 'VALUE1'
- - key: KEY2
- value: '${KEY3}${KEY4}' # gets the values of KEY3 and KEY4
- # add as many as required
- ```
-
---------------------------------------------------------------------------------
- REWRITING KEY VALUES
-
- --rewrite KEY=/MATCH/REPLACE[/OPTIONS]
- Apply a rewrite rule to the values of a specific key.
- The first character after KEY= is the separator, which should also
- be used between the MATCH, REPLACE and OPTIONS.
-
- OPTIONS can be a comma separated list of `non-empty`, `dont-stop` and
- `inject`.
-
- When `non-empty` is given, MATCH is expected to be a variable
- substitution using `${KEY1}${KEY2}`. Once the substitution is completed
- the rule is matching the KEY only if the result is not empty.
- When `non-empty` is not set, the MATCH string is expected to be a PCRE2
- regular expression to be checked against the KEY value. This PCRE2
- pattern may include named groups to extract parts of the KEY's value.
-
- REPLACE supports variable substitution like `${variable}` against MATCH
- named groups (when MATCH is a PCRE2 pattern) and `${KEY}` against the
- keys defined so far.
-
- Example:
- --rewrite DATE=/^(?<year>\d{4})-(?<month>\d{2})-(?<day>\d{2})$/
- ${day}/${month}/${year}
- The above will rewrite dates in the format YYYY-MM-DD to DD/MM/YYYY.
-
- Only one rewrite rule is applied per key; the sequence of rewrites for a
- given key, stops once a rule matches it. This allows providing a sequence
- of independent rewriting rules for the same key, matching the different
- values the key may get, and also provide a catch-all rewrite rule at the
- end, for setting the key value if no other rule matched it. The rewrite
- rule can allow processing more rewrite rules when OPTIONS includes
- the keyword 'dont-stop'.
-
- Up to 512 rewriting rules are allowed.
-
- In a YAML file:
- ```yaml
- rewrite:
- # the order if these rules in important - processed top to bottom
- - key: KEY1
- match: 'PCRE2 PATTERN WITH NAMED GROUPS'
- value: 'all match fields and input keys as ${VARIABLE}'
- inject: BOOLEAN # yes = inject the field, don't just rewrite it
- stop: BOOLEAN # no = continue processing, don't stop if matched
- - key: KEY2
- non_empty: '${KEY3}${KEY4}' # match only if this evaluates to non empty
- value: 'all input keys as ${VARIABLE}'
- inject: BOOLEAN # yes = inject the field, don't just rewrite it
- stop: BOOLEAN # no = continue processing, don't stop if matched
- # add as many rewrites as required
- ```
-
- By default rewrite rules are applied only on fields already defined.
- This allows shipping YAML files that include more rewrites than are
- required for a specific input file.
- Rewrite rules however allow injecting new fields when OPTIONS include
- the keyword `inject` or in YAML `inject: yes` is given.
-
- MATCH on the command line can be empty to define an unconditional rule.
- Similarly, `match` and `non_empty` can be omitted in the YAML file.
---------------------------------------------------------------------------------
- UNMATCHED LINES
-
- --unmatched-key KEY
- Include unmatched log entries in the output with KEY as the field name.
- Use this to include unmatched entries to the output stream.
- Usually it should be set to --unmatched-key=MESSAGE so that the
- unmatched entry will appear as the log message in the journals.
- Use --inject-unmatched to inject additional fields to unmatched lines.
-
- In a YAML file:
- ```yaml
- unmatched:
- key: MESSAGE # inject the error log as MESSAGE
- ```
-
- --inject-unmatched LINE
- Inject lines into the output for each unmatched log entry.
- Usually, --inject-unmatched=PRIORITY=3 is needed to mark the unmatched
- lines as errors, so that they can easily be spotted in the journals.
-
- Up to 512 such lines can be injected.
-
- In a YAML file:
- ```yaml
- unmatched:
- key: MESSAGE # inject the error log as MESSAGE
- inject::
- - key: KEY1
- value: 'VALUE1'
- # add as many constants as required
- ```
-
---------------------------------------------------------------------------------
- FILTERING
-
- --include PATTERN
- Include only keys matching the PCRE2 PATTERN.
- Useful when parsing JSON of logfmt logs, to include only the keys given.
- The keys are matched after the PREFIX has been added to them.
-
- --exclude PATTERN
- Exclude the keys matching the PCRE2 PATTERN.
- Useful when parsing JSON of logfmt logs, to exclude some of the keys given.
- The keys are matched after the PREFIX has been added to them.
-
- When both include and exclude patterns are set and both match a key,
- exclude wins and the key will not be added, like a pipeline, we first
- include it and then exclude it.
-
- In a YAML file:
- ```yaml
- filter:
- include: 'PCRE2 PATTERN MATCHING KEY NAMES TO INCLUDE'
- exclude: 'PCRE2 PATTERN MATCHING KEY NAMES TO EXCLUDE'
- ```
-
---------------------------------------------------------------------------------
- OTHER
-
- -h, or --help
- Display this help and exit.
-
- --show-config
- Show the configuration in YAML format before starting the job.
- This is also an easy way to convert command line parameters to yaml.
-
-The program accepts all parameters as both --option=value and --option value.
-
-The maximum log line length accepted is 1048576 characters.
-
-PIPELINE AND SEQUENCE OF PROCESSING
-
-This is a simple diagram of the pipeline taking place:
-
- +---------------------------------------------------+
- | INPUT |
- | read one log line at a time |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | EXTRACT FIELDS AND VALUES |
- | JSON, logfmt, or pattern based |
- | (apply optional PREFIX - all keys use capitals) |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | RENAME FIELDS |
- | change the names of the fields |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | INJECT NEW FIELDS |
- | constants, or other field values as variables |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | REWRITE FIELD VALUES |
- | pipeline multiple rewriting rules to alter |
- | the values of the fields |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | FILTER FIELDS |
- | use include and exclude patterns on the field |
- | names, to select which fields are sent to journal |
- +---------------------------------------------------+
- v v v v v v
- +---------------------------------------------------+
- | OUTPUT |
- | generate Journal Export Format |
- +---------------------------------------------------+
-
---------------------------------------------------------------------------------
-JOURNAL FIELDS RULES (enforced by systemd-journald)
-
- - field names can be up to 64 characters
- - the only allowed field characters are A-Z, 0-9 and underscore
- - the first character of fields cannot be a digit
- - protected journal fields start with underscore:
- * they are accepted by systemd-journal-remote
- * they are NOT accepted by a local systemd-journald
-
- For best results, always include these fields:
-
- MESSAGE=TEXT
- The MESSAGE is the body of the log entry.
- This field is what we usually see in our logs.
-
- PRIORITY=NUMBER
- PRIORITY sets the severity of the log entry.
- 0=emerg, 1=alert, 2=crit, 3=err, 4=warn, 5=notice, 6=info, 7=debug
- - Emergency events (0) are usually broadcast to all terminals.
- - Emergency, alert, critical, and error (0-3) are usually colored red.
- - Warning (4) entries are usually colored yellow.
- - Notice (5) entries are usually bold or have a brighter white color.
- - Info (6) entries are the default.
- - Debug (7) entries are usually grayed or dimmed.
-
- SYSLOG_IDENTIFIER=NAME
- SYSLOG_IDENTIFIER sets the name of application.
- Use something descriptive, like: SYSLOG_IDENTIFIER=nginx-logs
-
-You can find the most common fields at 'man systemd.journal-fields'.
-
-```
-
-`log2journal` supports YAML configuration files, like the ones found [in this directory](https://github.com/netdata/netdata/tree/master/collectors/log2journal/log2journal.d).
-
-## `systemd-cat-native` options
-
-Read [the manual of systemd-cat-native](https://github.com/netdata/netdata/blob/master/libnetdata/log/systemd-cat-native.md).
diff --git a/collectors/log2journal/log2journal-help.c b/collectors/log2journal/log2journal-help.c
deleted file mode 100644
index 21be948e8..000000000
--- a/collectors/log2journal/log2journal-help.c
+++ /dev/null
@@ -1,377 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-static void config_dir_print_available(void) {
- const char *path = LOG2JOURNAL_CONFIG_PATH;
- DIR *dir;
- struct dirent *entry;
-
- dir = opendir(path);
-
- if (dir == NULL) {
- log2stderr(" >>> Cannot open directory:\n %s", path);
- return;
- }
-
- size_t column_width = 80;
- size_t current_columns = 7; // Start with 7 spaces for the first line
-
- while ((entry = readdir(dir))) {
- if (entry->d_type == DT_REG) { // Check if it's a regular file
- const char *file_name = entry->d_name;
- size_t len = strlen(file_name);
- if (len >= 5 && strcmp(file_name + len - 5, ".yaml") == 0) {
- // Remove the ".yaml" extension
- len -= 5;
- if (current_columns == 7) {
- printf(" "); // Print 7 spaces at the beginning of a new line
- }
- if (current_columns + len + 1 > column_width) {
- // Start a new line if the current line is full
- printf("\n "); // Print newline and 7 spaces
- current_columns = 7;
- }
- printf("%.*s ", (int)len, file_name); // Print the filename without extension
- current_columns += len + 1; // Add filename length and a space
- }
- }
- }
-
- closedir(dir);
- printf("\n"); // Add a newline at the end
-}
-
-void log_job_command_line_help(const char *name) {
- printf("\n");
- printf("Netdata log2journal " PACKAGE_VERSION "\n");
- printf("\n");
- printf("Convert logs to systemd Journal Export Format.\n");
- printf("\n");
- printf(" - JSON logs: extracts all JSON fields.\n");
- printf(" - logfmt logs: extracts all logfmt fields.\n");
- printf(" - free-form logs: uses PCRE2 patterns to extracts fields.\n");
- printf("\n");
- printf("Usage: %s [OPTIONS] PATTERN|json\n", name);
- printf("\n");
- printf("Options:\n");
- printf("\n");
-#ifdef HAVE_LIBYAML
- printf(" --file /path/to/file.yaml or -f /path/to/file.yaml\n");
- printf(" Read yaml configuration file for instructions.\n");
- printf("\n");
- printf(" --config CONFIG_NAME or -c CONFIG_NAME\n");
- printf(" Run with the internal YAML configuration named CONFIG_NAME.\n");
- printf(" Available internal YAML configs:\n");
- printf("\n");
- config_dir_print_available();
- printf("\n");
-#else
- printf(" IMPORTANT:\n");
- printf(" YAML configuration parsing is not compiled in this binary.\n");
- printf("\n");
-#endif
- printf("--------------------------------------------------------------------------------\n");
- printf(" INPUT PROCESSING\n");
- printf("\n");
- printf(" PATTERN\n");
- printf(" PATTERN should be a valid PCRE2 regular expression.\n");
- printf(" RE2 regular expressions (like the ones usually used in Go applications),\n");
- printf(" are usually valid PCRE2 patterns too.\n");
- printf(" Sub-expressions without named groups are evaluated, but their matches are\n");
- printf(" not added to the output.\n");
- printf("\n");
- printf(" - JSON mode\n");
- printf(" JSON mode is enabled when the pattern is set to: json\n");
- printf(" Field names are extracted from the JSON logs and are converted to the\n");
- printf(" format expected by Journal Export Format (all caps, only _ is allowed).\n");
- printf("\n");
- printf(" - logfmt mode\n");
- printf(" logfmt mode is enabled when the pattern is set to: logfmt\n");
- printf(" Field names are extracted from the logfmt logs and are converted to the\n");
- printf(" format expected by Journal Export Format (all caps, only _ is allowed).\n");
- printf("\n");
- printf(" All keys extracted from the input, are transliterated to match Journal\n");
- printf(" semantics (capital A-Z, digits 0-9, underscore).\n");
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" pattern: 'PCRE2 pattern | json | logfmt'\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" GLOBALS\n");
- printf("\n");
- printf(" --prefix PREFIX\n");
- printf(" Prefix all fields with PREFIX. The PREFIX is added before any other\n");
- printf(" processing, so that the extracted keys have to be matched with the PREFIX in\n");
- printf(" them. PREFIX is NOT transliterated and it is assumed to be systemd-journal\n");
- printf(" friendly.\n");
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" prefix: 'PREFIX_' # prepend all keys with this prefix.\n");
- printf(" ```\n");
- printf("\n");
- printf(" --filename-key KEY\n");
- printf(" Add a field with KEY as the key and the current filename as value.\n");
- printf(" Automatically detects filenames when piped after 'tail -F',\n");
- printf(" and tail matches multiple filenames.\n");
- printf(" To inject the filename when tailing a single file, use --inject.\n");
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" filename:\n");
- printf(" key: KEY\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" RENAMING OF KEYS\n");
- printf("\n");
- printf(" --rename NEW=OLD\n");
- printf(" Rename fields. OLD has been transliterated and PREFIX has been added.\n");
- printf(" NEW is assumed to be systemd journal friendly.\n");
- printf("\n");
- printf(" Up to %d renaming rules are allowed.\n", MAX_RENAMES);
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" rename:\n");
- printf(" - new_key: KEY1\n");
- printf(" old_key: KEY2 # transliterated with PREFIX added\n");
- printf(" - new_key: KEY3\n");
- printf(" old_key: KEY4 # transliterated with PREFIX added\n");
- printf(" # add as many as required\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" INJECTING NEW KEYS\n");
- printf("\n");
- printf(" --inject KEY=VALUE\n");
- printf(" Inject constant fields to the output (both matched and unmatched logs).\n");
- printf(" --inject entries are added to unmatched lines too, when their key is\n");
- printf(" not used in --inject-unmatched (--inject-unmatched override --inject).\n");
- printf(" VALUE can use variable like ${OTHER_KEY} to be replaced with the values\n");
- printf(" of other keys available.\n");
- printf("\n");
- printf(" Up to %d fields can be injected.\n", MAX_INJECTIONS);
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" inject:\n");
- printf(" - key: KEY1\n");
- printf(" value: 'VALUE1'\n");
- printf(" - key: KEY2\n");
- printf(" value: '${KEY3}${KEY4}' # gets the values of KEY3 and KEY4\n");
- printf(" # add as many as required\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" REWRITING KEY VALUES\n");
- printf("\n");
- printf(" --rewrite KEY=/MATCH/REPLACE[/OPTIONS]\n");
- printf(" Apply a rewrite rule to the values of a specific key.\n");
- printf(" The first character after KEY= is the separator, which should also\n");
- printf(" be used between the MATCH, REPLACE and OPTIONS.\n");
- printf("\n");
- printf(" OPTIONS can be a comma separated list of `non-empty`, `dont-stop` and\n");
- printf(" `inject`.\n");
- printf("\n");
- printf(" When `non-empty` is given, MATCH is expected to be a variable\n");
- printf(" substitution using `${KEY1}${KEY2}`. Once the substitution is completed\n");
- printf(" the rule is matching the KEY only if the result is not empty.\n");
- printf(" When `non-empty` is not set, the MATCH string is expected to be a PCRE2\n");
- printf(" regular expression to be checked against the KEY value. This PCRE2\n");
- printf(" pattern may include named groups to extract parts of the KEY's value.\n");
- printf("\n");
- printf(" REPLACE supports variable substitution like `${variable}` against MATCH\n");
- printf(" named groups (when MATCH is a PCRE2 pattern) and `${KEY}` against the\n");
- printf(" keys defined so far.\n");
- printf("\n");
- printf(" Example:\n");
- printf(" --rewrite DATE=/^(?<year>\\d{4})-(?<month>\\d{2})-(?<day>\\d{2})$/\n");
- printf(" ${day}/${month}/${year}\n");
- printf(" The above will rewrite dates in the format YYYY-MM-DD to DD/MM/YYYY.\n");
- printf("\n");
- printf(" Only one rewrite rule is applied per key; the sequence of rewrites for a\n");
- printf(" given key, stops once a rule matches it. This allows providing a sequence\n");
- printf(" of independent rewriting rules for the same key, matching the different\n");
- printf(" values the key may get, and also provide a catch-all rewrite rule at the\n");
- printf(" end, for setting the key value if no other rule matched it. The rewrite\n");
- printf(" rule can allow processing more rewrite rules when OPTIONS includes\n");
- printf(" the keyword 'dont-stop'.\n");
- printf("\n");
- printf(" Up to %d rewriting rules are allowed.\n", MAX_REWRITES);
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" rewrite:\n");
- printf(" # the order if these rules in important - processed top to bottom\n");
- printf(" - key: KEY1\n");
- printf(" match: 'PCRE2 PATTERN WITH NAMED GROUPS'\n");
- printf(" value: 'all match fields and input keys as ${VARIABLE}'\n");
- printf(" inject: BOOLEAN # yes = inject the field, don't just rewrite it\n");
- printf(" stop: BOOLEAN # no = continue processing, don't stop if matched\n");
- printf(" - key: KEY2\n");
- printf(" non_empty: '${KEY3}${KEY4}' # match only if this evaluates to non empty\n");
- printf(" value: 'all input keys as ${VARIABLE}'\n");
- printf(" inject: BOOLEAN # yes = inject the field, don't just rewrite it\n");
- printf(" stop: BOOLEAN # no = continue processing, don't stop if matched\n");
- printf(" # add as many rewrites as required\n");
- printf(" ```\n");
- printf("\n");
- printf(" By default rewrite rules are applied only on fields already defined.\n");
- printf(" This allows shipping YAML files that include more rewrites than are\n");
- printf(" required for a specific input file.\n");
- printf(" Rewrite rules however allow injecting new fields when OPTIONS include\n");
- printf(" the keyword `inject` or in YAML `inject: yes` is given.\n");
- printf("\n");
- printf(" MATCH on the command line can be empty to define an unconditional rule.\n");
- printf(" Similarly, `match` and `non_empty` can be omitted in the YAML file.");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" UNMATCHED LINES\n");
- printf("\n");
- printf(" --unmatched-key KEY\n");
- printf(" Include unmatched log entries in the output with KEY as the field name.\n");
- printf(" Use this to include unmatched entries to the output stream.\n");
- printf(" Usually it should be set to --unmatched-key=MESSAGE so that the\n");
- printf(" unmatched entry will appear as the log message in the journals.\n");
- printf(" Use --inject-unmatched to inject additional fields to unmatched lines.\n");
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" unmatched:\n");
- printf(" key: MESSAGE # inject the error log as MESSAGE\n");
- printf(" ```\n");
- printf("\n");
- printf(" --inject-unmatched LINE\n");
- printf(" Inject lines into the output for each unmatched log entry.\n");
- printf(" Usually, --inject-unmatched=PRIORITY=3 is needed to mark the unmatched\n");
- printf(" lines as errors, so that they can easily be spotted in the journals.\n");
- printf("\n");
- printf(" Up to %d such lines can be injected.\n", MAX_INJECTIONS);
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" unmatched:\n");
- printf(" key: MESSAGE # inject the error log as MESSAGE\n");
- printf(" inject::\n");
- printf(" - key: KEY1\n");
- printf(" value: 'VALUE1'\n");
- printf(" # add as many constants as required\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" FILTERING\n");
- printf("\n");
- printf(" --include PATTERN\n");
- printf(" Include only keys matching the PCRE2 PATTERN.\n");
- printf(" Useful when parsing JSON of logfmt logs, to include only the keys given.\n");
- printf(" The keys are matched after the PREFIX has been added to them.\n");
- printf("\n");
- printf(" --exclude PATTERN\n");
- printf(" Exclude the keys matching the PCRE2 PATTERN.\n");
- printf(" Useful when parsing JSON of logfmt logs, to exclude some of the keys given.\n");
- printf(" The keys are matched after the PREFIX has been added to them.\n");
- printf("\n");
- printf(" When both include and exclude patterns are set and both match a key,\n");
- printf(" exclude wins and the key will not be added, like a pipeline, we first\n");
- printf(" include it and then exclude it.\n");
- printf("\n");
- printf(" In a YAML file:\n");
- printf(" ```yaml\n");
- printf(" filter:\n");
- printf(" include: 'PCRE2 PATTERN MATCHING KEY NAMES TO INCLUDE'\n");
- printf(" exclude: 'PCRE2 PATTERN MATCHING KEY NAMES TO EXCLUDE'\n");
- printf(" ```\n");
- printf("\n");
- printf("--------------------------------------------------------------------------------\n");
- printf(" OTHER\n");
- printf("\n");
- printf(" -h, or --help\n");
- printf(" Display this help and exit.\n");
- printf("\n");
- printf(" --show-config\n");
- printf(" Show the configuration in YAML format before starting the job.\n");
- printf(" This is also an easy way to convert command line parameters to yaml.\n");
- printf("\n");
- printf("The program accepts all parameters as both --option=value and --option value.\n");
- printf("\n");
- printf("The maximum log line length accepted is %d characters.\n", MAX_LINE_LENGTH);
- printf("\n");
- printf("PIPELINE AND SEQUENCE OF PROCESSING\n");
- printf("\n");
- printf("This is a simple diagram of the pipeline taking place:\n");
- printf(" \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | INPUT | \n");
- printf(" | read one log line at a time | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | EXTRACT FIELDS AND VALUES | \n");
- printf(" | JSON, logfmt, or pattern based | \n");
- printf(" | (apply optional PREFIX - all keys use capitals) | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | RENAME FIELDS | \n");
- printf(" | change the names of the fields | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | INJECT NEW FIELDS | \n");
- printf(" | constants, or other field values as variables | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | REWRITE FIELD VALUES | \n");
- printf(" | pipeline multiple rewriting rules to alter | \n");
- printf(" | the values of the fields | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | FILTER FIELDS | \n");
- printf(" | use include and exclude patterns on the field | \n");
- printf(" | names, to select which fields are sent to journal | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" v v v v v v \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" | OUTPUT | \n");
- printf(" | generate Journal Export Format | \n");
- printf(" +---------------------------------------------------+ \n");
- printf(" \n");
- printf("--------------------------------------------------------------------------------\n");
- printf("JOURNAL FIELDS RULES (enforced by systemd-journald)\n");
- printf("\n");
- printf(" - field names can be up to 64 characters\n");
- printf(" - the only allowed field characters are A-Z, 0-9 and underscore\n");
- printf(" - the first character of fields cannot be a digit\n");
- printf(" - protected journal fields start with underscore:\n");
- printf(" * they are accepted by systemd-journal-remote\n");
- printf(" * they are NOT accepted by a local systemd-journald\n");
- printf("\n");
- printf(" For best results, always include these fields:\n");
- printf("\n");
- printf(" MESSAGE=TEXT\n");
- printf(" The MESSAGE is the body of the log entry.\n");
- printf(" This field is what we usually see in our logs.\n");
- printf("\n");
- printf(" PRIORITY=NUMBER\n");
- printf(" PRIORITY sets the severity of the log entry.\n");
- printf(" 0=emerg, 1=alert, 2=crit, 3=err, 4=warn, 5=notice, 6=info, 7=debug\n");
- printf(" - Emergency events (0) are usually broadcast to all terminals.\n");
- printf(" - Emergency, alert, critical, and error (0-3) are usually colored red.\n");
- printf(" - Warning (4) entries are usually colored yellow.\n");
- printf(" - Notice (5) entries are usually bold or have a brighter white color.\n");
- printf(" - Info (6) entries are the default.\n");
- printf(" - Debug (7) entries are usually grayed or dimmed.\n");
- printf("\n");
- printf(" SYSLOG_IDENTIFIER=NAME\n");
- printf(" SYSLOG_IDENTIFIER sets the name of application.\n");
- printf(" Use something descriptive, like: SYSLOG_IDENTIFIER=nginx-logs\n");
- printf("\n");
- printf("You can find the most common fields at 'man systemd.journal-fields'.\n");
- printf("\n");
-}
diff --git a/collectors/log2journal/log2journal-inject.c b/collectors/log2journal/log2journal-inject.c
deleted file mode 100644
index 45158066b..000000000
--- a/collectors/log2journal/log2journal-inject.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-void injection_cleanup(INJECTION *inj) {
- hashed_key_cleanup(&inj->key);
- replace_pattern_cleanup(&inj->value);
-}
-
-static inline bool log_job_injection_replace(INJECTION *inj, const char *key, size_t key_len, const char *value, size_t value_len) {
- if(key_len > JOURNAL_MAX_KEY_LEN)
- log2stderr("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
-
- if(value_len > JOURNAL_MAX_VALUE_LEN)
- log2stderr("WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
-
- hashed_key_len_set(&inj->key, key, key_len);
- char *v = strndupz(value, value_len);
- bool ret = replace_pattern_set(&inj->value, v);
- freez(v);
-
- return ret;
-}
-
-bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched) {
- if (unmatched) {
- if (jb->unmatched.injections.used >= MAX_INJECTIONS) {
- log2stderr("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS);
- return false;
- }
- }
- else {
- if (jb->injections.used >= MAX_INJECTIONS) {
- log2stderr("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS);
- return false;
- }
- }
-
- bool ret;
- if (unmatched) {
- ret = log_job_injection_replace(&jb->unmatched.injections.keys[jb->unmatched.injections.used++],
- key, key_len, value, value_len);
- } else {
- ret = log_job_injection_replace(&jb->injections.keys[jb->injections.used++],
- key, key_len, value, value_len);
- }
-
- return ret;
-}
diff --git a/collectors/log2journal/log2journal-json.c b/collectors/log2journal/log2journal-json.c
deleted file mode 100644
index 2ca294e4d..000000000
--- a/collectors/log2journal/log2journal-json.c
+++ /dev/null
@@ -1,630 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-#define JSON_ERROR_LINE_MAX 1024
-#define JSON_KEY_MAX 1024
-#define JSON_DEPTH_MAX 100
-
-struct log_json_state {
- LOG_JOB *jb;
-
- const char *line;
- uint32_t pos;
- uint32_t depth;
- char *stack[JSON_DEPTH_MAX];
-
- char key[JSON_KEY_MAX];
- char msg[JSON_ERROR_LINE_MAX];
-};
-
-static inline bool json_parse_object(LOG_JSON_STATE *js);
-static inline bool json_parse_array(LOG_JSON_STATE *js);
-
-#define json_current_pos(js) &(js)->line[(js)->pos]
-#define json_consume_char(js) ++(js)->pos
-
-static inline void json_process_key_value(LOG_JSON_STATE *js, const char *value, size_t len) {
- log_job_send_extracted_key_value(js->jb, js->key, value, len);
-}
-
-static inline void json_skip_spaces(LOG_JSON_STATE *js) {
- const char *s = json_current_pos(js);
- const char *start = s;
-
- while(isspace(*s)) s++;
-
- js->pos += s - start;
-}
-
-static inline bool json_expect_char_after_white_space(LOG_JSON_STATE *js, const char *expected) {
- json_skip_spaces(js);
-
- const char *s = json_current_pos(js);
- for(const char *e = expected; *e ;e++) {
- if (*s == *e)
- return true;
- }
-
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: character '%c' is not one of the expected characters (%s), at pos %zu",
- *s ? *s : '?', expected, js->pos);
-
- return false;
-}
-
-static inline bool json_parse_null(LOG_JSON_STATE *js) {
- const char *s = json_current_pos(js);
- if (strncmp(s, "null", 4) == 0) {
- json_process_key_value(js, "null", 4);
- js->pos += 4;
- return true;
- }
- else {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: expected 'null', found '%.4s' at position %zu", s, js->pos);
- return false;
- }
-}
-
-static inline bool json_parse_true(LOG_JSON_STATE *js) {
- const char *s = json_current_pos(js);
- if (strncmp(s, "true", 4) == 0) {
- json_process_key_value(js, "true", 4);
- js->pos += 4;
- return true;
- }
- else {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: expected 'true', found '%.4s' at position %zu", s, js->pos);
- return false;
- }
-}
-
-static inline bool json_parse_false(LOG_JSON_STATE *js) {
- const char *s = json_current_pos(js);
- if (strncmp(s, "false", 5) == 0) {
- json_process_key_value(js, "false", 5);
- js->pos += 5;
- return true;
- }
- else {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: expected 'false', found '%.4s' at position %zu", s, js->pos);
- return false;
- }
-}
-
-static inline bool json_parse_number(LOG_JSON_STATE *js) {
- static __thread char value[8192];
-
- value[0] = '\0';
- char *d = value;
- const char *s = json_current_pos(js);
- size_t remaining = sizeof(value) - 1; // Reserve space for null terminator
-
- // Optional minus sign
- if (*s == '-') {
- *d++ = *s++;
- remaining--;
- }
-
- // Digits before decimal point
- while (*s >= '0' && *s <= '9') {
- if (remaining < 2) {
- snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated number value at pos %zu", js->pos);
- return false;
- }
- *d++ = *s++;
- remaining--;
- }
-
- // Decimal point and fractional part
- if (*s == '.') {
- *d++ = *s++;
- remaining--;
-
- while (*s >= '0' && *s <= '9') {
- if (remaining < 2) {
- snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated fractional part at pos %zu", js->pos);
- return false;
- }
- *d++ = *s++;
- remaining--;
- }
- }
-
- // Exponent part
- if (*s == 'e' || *s == 'E') {
- *d++ = *s++;
- remaining--;
-
- // Optional sign in exponent
- if (*s == '+' || *s == '-') {
- *d++ = *s++;
- remaining--;
- }
-
- while (*s >= '0' && *s <= '9') {
- if (remaining < 2) {
- snprintf(js->msg, sizeof(js->msg), "JSON PARSER: truncated exponent at pos %zu", js->pos);
- return false;
- }
- *d++ = *s++;
- remaining--;
- }
- }
-
- *d = '\0';
- js->pos += d - value;
-
- if (d > value) {
- json_process_key_value(js, value, d - value);
- return true;
- } else {
- snprintf(js->msg, sizeof(js->msg), "JSON PARSER: invalid number format at pos %zu", js->pos);
- return false;
- }
-}
-
-static inline bool encode_utf8(unsigned codepoint, char **d, size_t *remaining) {
- if (codepoint <= 0x7F) {
- // 1-byte sequence
- if (*remaining < 2) return false; // +1 for the null
- *(*d)++ = (char)codepoint;
- (*remaining)--;
- }
- else if (codepoint <= 0x7FF) {
- // 2-byte sequence
- if (*remaining < 3) return false; // +1 for the null
- *(*d)++ = (char)(0xC0 | ((codepoint >> 6) & 0x1F));
- *(*d)++ = (char)(0x80 | (codepoint & 0x3F));
- (*remaining) -= 2;
- }
- else if (codepoint <= 0xFFFF) {
- // 3-byte sequence
- if (*remaining < 4) return false; // +1 for the null
- *(*d)++ = (char)(0xE0 | ((codepoint >> 12) & 0x0F));
- *(*d)++ = (char)(0x80 | ((codepoint >> 6) & 0x3F));
- *(*d)++ = (char)(0x80 | (codepoint & 0x3F));
- (*remaining) -= 3;
- }
- else if (codepoint <= 0x10FFFF) {
- // 4-byte sequence
- if (*remaining < 5) return false; // +1 for the null
- *(*d)++ = (char)(0xF0 | ((codepoint >> 18) & 0x07));
- *(*d)++ = (char)(0x80 | ((codepoint >> 12) & 0x3F));
- *(*d)++ = (char)(0x80 | ((codepoint >> 6) & 0x3F));
- *(*d)++ = (char)(0x80 | (codepoint & 0x3F));
- (*remaining) -= 4;
- }
- else
- // Invalid code point
- return false;
-
- return true;
-}
-
-size_t parse_surrogate(const char *s, char *d, size_t *remaining) {
- if (s[0] != '\\' || (s[1] != 'u' && s[1] != 'U')) {
- return 0; // Not a valid Unicode escape sequence
- }
-
- char hex[9] = {0}; // Buffer for the hexadecimal value
- unsigned codepoint;
-
- if (s[1] == 'u') {
- // Handle \uXXXX
- if (!isxdigit(s[2]) || !isxdigit(s[3]) || !isxdigit(s[4]) || !isxdigit(s[5])) {
- return 0; // Not a valid \uXXXX sequence
- }
-
- hex[0] = s[2];
- hex[1] = s[3];
- hex[2] = s[4];
- hex[3] = s[5];
- codepoint = (unsigned)strtoul(hex, NULL, 16);
-
- if (codepoint >= 0xD800 && codepoint <= 0xDBFF) {
- // Possible start of surrogate pair
- if (s[6] == '\\' && s[7] == 'u' && isxdigit(s[8]) && isxdigit(s[9]) &&
- isxdigit(s[10]) && isxdigit(s[11])) {
- // Valid low surrogate
- unsigned low_surrogate = strtoul(&s[8], NULL, 16);
- if (low_surrogate < 0xDC00 || low_surrogate > 0xDFFF) {
- return 0; // Invalid low surrogate
- }
- codepoint = 0x10000 + ((codepoint - 0xD800) << 10) + (low_surrogate - 0xDC00);
- return encode_utf8(codepoint, &d, remaining) ? 12 : 0; // \uXXXX\uXXXX
- }
- }
-
- // Single \uXXXX
- return encode_utf8(codepoint, &d, remaining) ? 6 : 0;
- }
- else {
- // Handle \UXXXXXXXX
- for (int i = 2; i < 10; i++) {
- if (!isxdigit(s[i])) {
- return 0; // Not a valid \UXXXXXXXX sequence
- }
- hex[i - 2] = s[i];
- }
- codepoint = (unsigned)strtoul(hex, NULL, 16);
- return encode_utf8(codepoint, &d, remaining) ? 10 : 0; // \UXXXXXXXX
- }
-}
-
-static inline void copy_newline(LOG_JSON_STATE *js __maybe_unused, char **d, size_t *remaining) {
- if(*remaining > 3) {
- *(*d)++ = '\\';
- *(*d)++ = 'n';
- (*remaining) -= 2;
- }
-}
-
-static inline void copy_tab(LOG_JSON_STATE *js __maybe_unused, char **d, size_t *remaining) {
- if(*remaining > 3) {
- *(*d)++ = '\\';
- *(*d)++ = 't';
- (*remaining) -= 2;
- }
-}
-
-static inline bool json_parse_string(LOG_JSON_STATE *js) {
- static __thread char value[JOURNAL_MAX_VALUE_LEN];
-
- if(!json_expect_char_after_white_space(js, "\""))
- return false;
-
- json_consume_char(js);
-
- value[0] = '\0';
- char *d = value;
- const char *s = json_current_pos(js);
- size_t remaining = sizeof(value);
-
- while (*s && *s != '"') {
- char c;
-
- if (*s == '\\') {
- s++;
-
- switch (*s) {
- case 'n':
- copy_newline(js, &d, &remaining);
- s++;
- continue;
-
- case 't':
- copy_tab(js, &d, &remaining);
- s++;
- continue;
-
- case 'f':
- case 'b':
- case 'r':
- c = ' ';
- s++;
- break;
-
- case 'u': {
- size_t old_remaining = remaining;
- size_t consumed = parse_surrogate(s - 1, d, &remaining);
- if (consumed > 0) {
- s += consumed - 1; // -1 because we already incremented s after '\\'
- d += old_remaining - remaining;
- continue;
- }
- else {
- *d++ = '\\';
- remaining--;
- c = *s++;
- }
- }
- break;
-
- default:
- c = *s++;
- break;
- }
- }
- else
- c = *s++;
-
- if(remaining < 2) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: truncated string value at pos %zu", js->pos);
- return false;
- }
- else {
- *d++ = c;
- remaining--;
- }
- }
- *d = '\0';
- js->pos += s - json_current_pos(js);
-
- if(!json_expect_char_after_white_space(js, "\""))
- return false;
-
- json_consume_char(js);
-
- if(d > value)
- json_process_key_value(js, value, d - value);
-
- return true;
-}
-
-static inline bool json_parse_key_and_push(LOG_JSON_STATE *js) {
- if (!json_expect_char_after_white_space(js, "\""))
- return false;
-
- if(js->depth >= JSON_DEPTH_MAX - 1) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: object too deep, at pos %zu", js->pos);
- return false;
- }
-
- json_consume_char(js);
-
- char *d = js->stack[js->depth];
- if(js->depth)
- *d++ = '_';
-
- size_t remaining = sizeof(js->key) - (d - js->key);
-
- const char *s = json_current_pos(js);
- char last_c = '\0';
- while(*s && *s != '\"') {
- char c;
-
- if (*s == '\\') {
- s++;
- c = (char)((*s == 'u') ? '_' : journal_key_characters_map[(unsigned char)*s]);
- s += (*s == 'u') ? 5 : 1;
- }
- else
- c = journal_key_characters_map[(unsigned char)*s++];
-
- if(c == '_' && last_c == '_')
- continue;
- else {
- if(remaining < 2) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: key buffer full - keys are too long, at pos %zu", js->pos);
- return false;
- }
- *d++ = c;
- remaining--;
- }
-
- last_c = c;
- }
- *d = '\0';
- js->pos += s - json_current_pos(js);
-
- if (!json_expect_char_after_white_space(js, "\""))
- return false;
-
- json_consume_char(js);
-
- js->stack[++js->depth] = d;
-
- return true;
-}
-
-static inline bool json_key_pop(LOG_JSON_STATE *js) {
- if(js->depth <= 0) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: cannot pop a key at depth %zu, at pos %zu", js->depth, js->pos);
- return false;
- }
-
- char *k = js->stack[js->depth--];
- *k = '\0';
- return true;
-}
-
-static inline bool json_parse_value(LOG_JSON_STATE *js) {
- if(!json_expect_char_after_white_space(js, "-.0123456789tfn\"{["))
- return false;
-
- const char *s = json_current_pos(js);
- switch(*s) {
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- return json_parse_number(js);
-
- case 't':
- return json_parse_true(js);
-
- case 'f':
- return json_parse_false(js);
-
- case 'n':
- return json_parse_null(js);
-
- case '"':
- return json_parse_string(js);
-
- case '{':
- return json_parse_object(js);
-
- case '[':
- return json_parse_array(js);
- }
-
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: unexpected character at pos %zu", js->pos);
- return false;
-}
-
-static inline bool json_key_index_and_push(LOG_JSON_STATE *js, size_t index) {
- char *d = js->stack[js->depth];
- if(js->depth > 0) {
- *d++ = '_';
- }
-
- // Convert index to string manually
- char temp[32];
- char *t = temp + sizeof(temp) - 1; // Start at the end of the buffer
- *t = '\0';
-
- do {
- *--t = (char)((index % 10) + '0');
- index /= 10;
- } while (index > 0);
-
- size_t remaining = sizeof(js->key) - (d - js->key);
-
- // Append the index to the key
- while (*t) {
- if(remaining < 2) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: key buffer full - keys are too long, at pos %zu", js->pos);
- return false;
- }
-
- *d++ = *t++;
- remaining--;
- }
-
- *d = '\0'; // Null-terminate the key
- js->stack[++js->depth] = d;
-
- return true;
-}
-
-static inline bool json_parse_array(LOG_JSON_STATE *js) {
- if(!json_expect_char_after_white_space(js, "["))
- return false;
-
- json_consume_char(js);
-
- size_t index = 0;
- do {
- if(!json_key_index_and_push(js, index))
- return false;
-
- if(!json_parse_value(js))
- return false;
-
- json_key_pop(js);
-
- if(!json_expect_char_after_white_space(js, ",]"))
- return false;
-
- const char *s = json_current_pos(js);
- json_consume_char(js);
- if(*s == ',') {
- index++;
- continue;
- }
- else // }
- break;
-
- } while(true);
-
- return true;
-}
-
-static inline bool json_parse_object(LOG_JSON_STATE *js) {
- if(!json_expect_char_after_white_space(js, "{"))
- return false;
-
- json_consume_char(js);
-
- do {
- if (!json_expect_char_after_white_space(js, "\""))
- return false;
-
- if(!json_parse_key_and_push(js))
- return false;
-
- if(!json_expect_char_after_white_space(js, ":"))
- return false;
-
- json_consume_char(js);
-
- if(!json_parse_value(js))
- return false;
-
- json_key_pop(js);
-
- if(!json_expect_char_after_white_space(js, ",}"))
- return false;
-
- const char *s = json_current_pos(js);
- json_consume_char(js);
- if(*s == ',')
- continue;
- else // }
- break;
-
- } while(true);
-
- return true;
-}
-
-LOG_JSON_STATE *json_parser_create(LOG_JOB *jb) {
- LOG_JSON_STATE *js = mallocz(sizeof(LOG_JSON_STATE));
- memset(js, 0, sizeof(LOG_JSON_STATE));
- js->jb = jb;
-
- if(jb->prefix)
- copy_to_buffer(js->key, sizeof(js->key), js->jb->prefix, strlen(js->jb->prefix));
-
- js->stack[0] = &js->key[strlen(js->key)];
-
- return js;
-}
-
-void json_parser_destroy(LOG_JSON_STATE *js) {
- if(js)
- freez(js);
-}
-
-const char *json_parser_error(LOG_JSON_STATE *js) {
- return js->msg;
-}
-
-bool json_parse_document(LOG_JSON_STATE *js, const char *txt) {
- js->line = txt;
- js->pos = 0;
- js->msg[0] = '\0';
- js->stack[0][0] = '\0';
- js->depth = 0;
-
- if(!json_parse_object(js))
- return false;
-
- json_skip_spaces(js);
- const char *s = json_current_pos(js);
-
- if(*s) {
- snprintf(js->msg, sizeof(js->msg),
- "JSON PARSER: excess characters found after document is finished, at pos %zu", js->pos);
- return false;
- }
-
- return true;
-}
-
-void json_test(void) {
- LOG_JOB jb = { .prefix = "NIGNX_" };
- LOG_JSON_STATE *json = json_parser_create(&jb);
-
- json_parse_document(json, "{\"value\":\"\\u\\u039A\\u03B1\\u03BB\\u03B7\\u03BC\\u03AD\\u03C1\\u03B1\"}");
-
- json_parser_destroy(json);
-}
diff --git a/collectors/log2journal/log2journal-logfmt.c b/collectors/log2journal/log2journal-logfmt.c
deleted file mode 100644
index 5966cce90..000000000
--- a/collectors/log2journal/log2journal-logfmt.c
+++ /dev/null
@@ -1,226 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-#define LOGFMT_ERROR_LINE_MAX 1024
-#define LOGFMT_KEY_MAX 1024
-
-struct logfmt_state {
- LOG_JOB *jb;
-
- const char *line;
- uint32_t pos;
- uint32_t key_start;
-
- char key[LOGFMT_KEY_MAX];
- char msg[LOGFMT_ERROR_LINE_MAX];
-};
-
-#define logfmt_current_pos(lfs) &(lfs)->line[(lfs)->pos]
-#define logfmt_consume_char(lfs) ++(lfs)->pos
-
-static inline void logfmt_process_key_value(LOGFMT_STATE *lfs, const char *value, size_t len) {
- log_job_send_extracted_key_value(lfs->jb, lfs->key, value, len);
-}
-
-static inline void logfmt_skip_spaces(LOGFMT_STATE *lfs) {
- const char *s = logfmt_current_pos(lfs);
- const char *start = s;
-
- while(isspace(*s)) s++;
-
- lfs->pos += s - start;
-}
-
-static inline void copy_newline(LOGFMT_STATE *lfs __maybe_unused, char **d, size_t *remaining) {
- if(*remaining > 3) {
- *(*d)++ = '\\';
- *(*d)++ = 'n';
- (*remaining) -= 2;
- }
-}
-
-static inline void copy_tab(LOGFMT_STATE *lfs __maybe_unused, char **d, size_t *remaining) {
- if(*remaining > 3) {
- *(*d)++ = '\\';
- *(*d)++ = 't';
- (*remaining) -= 2;
- }
-}
-
-static inline bool logftm_parse_value(LOGFMT_STATE *lfs) {
- static __thread char value[JOURNAL_MAX_VALUE_LEN];
-
- char quote = '\0';
- const char *s = logfmt_current_pos(lfs);
- if(*s == '\"' || *s == '\'') {
- quote = *s;
- logfmt_consume_char(lfs);
- }
-
- value[0] = '\0';
- char *d = value;
- s = logfmt_current_pos(lfs);
- size_t remaining = sizeof(value);
-
- char end_char = (char)(quote == '\0' ? ' ' : quote);
- while (*s && *s != end_char) {
- char c;
-
- if (*s == '\\') {
- s++;
-
- switch (*s) {
- case 'n':
- copy_newline(lfs, &d, &remaining);
- s++;
- continue;
-
- case 't':
- copy_tab(lfs, &d, &remaining);
- s++;
- continue;
-
- case 'f':
- case 'b':
- case 'r':
- c = ' ';
- s++;
- break;
-
- default:
- c = *s++;
- break;
- }
- }
- else
- c = *s++;
-
- if(remaining < 2) {
- snprintf(lfs->msg, sizeof(lfs->msg),
- "LOGFMT PARSER: truncated string value at pos %zu", lfs->pos);
- return false;
- }
- else {
- *d++ = c;
- remaining--;
- }
- }
- *d = '\0';
- lfs->pos += s - logfmt_current_pos(lfs);
-
- s = logfmt_current_pos(lfs);
-
- if(quote != '\0') {
- if (*s != quote) {
- snprintf(lfs->msg, sizeof(lfs->msg),
- "LOGFMT PARSER: missing quote at pos %zu: '%s'",
- lfs->pos, s);
- return false;
- }
- else
- logfmt_consume_char(lfs);
- }
-
- if(d > value)
- logfmt_process_key_value(lfs, value, d - value);
-
- return true;
-}
-
-static inline bool logfmt_parse_key(LOGFMT_STATE *lfs) {
- logfmt_skip_spaces(lfs);
-
- char *d = &lfs->key[lfs->key_start];
-
- size_t remaining = sizeof(lfs->key) - (d - lfs->key);
-
- const char *s = logfmt_current_pos(lfs);
- char last_c = '\0';
- while(*s && *s != '=') {
- char c;
-
- if (*s == '\\')
- s++;
-
- c = journal_key_characters_map[(unsigned char)*s++];
-
- if(c == '_' && last_c == '_')
- continue;
- else {
- if(remaining < 2) {
- snprintf(lfs->msg, sizeof(lfs->msg),
- "LOGFMT PARSER: key buffer full - keys are too long, at pos %zu", lfs->pos);
- return false;
- }
- *d++ = c;
- remaining--;
- }
-
- last_c = c;
- }
- *d = '\0';
- lfs->pos += s - logfmt_current_pos(lfs);
-
- s = logfmt_current_pos(lfs);
- if(*s != '=') {
- snprintf(lfs->msg, sizeof(lfs->msg),
- "LOGFMT PARSER: key is missing the equal sign, at pos %zu", lfs->pos);
- return false;
- }
-
- logfmt_consume_char(lfs);
-
- return true;
-}
-
-LOGFMT_STATE *logfmt_parser_create(LOG_JOB *jb) {
- LOGFMT_STATE *lfs = mallocz(sizeof(LOGFMT_STATE));
- memset(lfs, 0, sizeof(LOGFMT_STATE));
- lfs->jb = jb;
-
- if(jb->prefix)
- lfs->key_start = copy_to_buffer(lfs->key, sizeof(lfs->key), lfs->jb->prefix, strlen(lfs->jb->prefix));
-
- return lfs;
-}
-
-void logfmt_parser_destroy(LOGFMT_STATE *lfs) {
- if(lfs)
- freez(lfs);
-}
-
-const char *logfmt_parser_error(LOGFMT_STATE *lfs) {
- return lfs->msg;
-}
-
-bool logfmt_parse_document(LOGFMT_STATE *lfs, const char *txt) {
- lfs->line = txt;
- lfs->pos = 0;
- lfs->msg[0] = '\0';
-
- const char *s;
- do {
- if(!logfmt_parse_key(lfs))
- return false;
-
- if(!logftm_parse_value(lfs))
- return false;
-
- logfmt_skip_spaces(lfs);
-
- s = logfmt_current_pos(lfs);
- } while(*s);
-
- return true;
-}
-
-
-void logfmt_test(void) {
- LOG_JOB jb = { .prefix = "NIGNX_" };
- LOGFMT_STATE *logfmt = logfmt_parser_create(&jb);
-
- logfmt_parse_document(logfmt, "x=1 y=2 z=\"3 \\ 4\" 5 ");
-
- logfmt_parser_destroy(logfmt);
-}
diff --git a/collectors/log2journal/log2journal-params.c b/collectors/log2journal/log2journal-params.c
deleted file mode 100644
index a7bb3e263..000000000
--- a/collectors/log2journal/log2journal-params.c
+++ /dev/null
@@ -1,404 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-// ----------------------------------------------------------------------------
-
-void log_job_init(LOG_JOB *jb) {
- memset(jb, 0, sizeof(*jb));
- simple_hashtable_init_KEY(&jb->hashtable, 32);
- hashed_key_set(&jb->line.key, "LINE");
-}
-
-static void simple_hashtable_cleanup_allocated_keys(SIMPLE_HASHTABLE_KEY *ht) {
- SIMPLE_HASHTABLE_FOREACH_READ_ONLY(ht, sl, _KEY) {
- HASHED_KEY *k = SIMPLE_HASHTABLE_FOREACH_READ_ONLY_VALUE(sl);
- if(k && k->flags & HK_HASHTABLE_ALLOCATED) {
- // the order of these statements is important!
- simple_hashtable_del_slot_KEY(ht, sl); // remove any references to n
- hashed_key_cleanup(k); // cleanup the internals of n
- freez(k); // free n
- }
- }
-}
-
-void log_job_cleanup(LOG_JOB *jb) {
- hashed_key_cleanup(&jb->line.key);
-
- if(jb->prefix) {
- freez((void *) jb->prefix);
- jb->prefix = NULL;
- }
-
- if(jb->pattern) {
- freez((void *) jb->pattern);
- jb->pattern = NULL;
- }
-
- for(size_t i = 0; i < jb->injections.used ;i++)
- injection_cleanup(&jb->injections.keys[i]);
-
- for(size_t i = 0; i < jb->unmatched.injections.used ;i++)
- injection_cleanup(&jb->unmatched.injections.keys[i]);
-
- for(size_t i = 0; i < jb->renames.used ;i++)
- rename_cleanup(&jb->renames.array[i]);
-
- for(size_t i = 0; i < jb->rewrites.used; i++)
- rewrite_cleanup(&jb->rewrites.array[i]);
-
- txt_cleanup(&jb->rewrites.tmp);
- txt_cleanup(&jb->filename.current);
-
- simple_hashtable_cleanup_allocated_keys(&jb->hashtable);
- simple_hashtable_destroy_KEY(&jb->hashtable);
-
- // remove references to everything else, to reveal them in valgrind
- memset(jb, 0, sizeof(*jb));
-}
-
-// ----------------------------------------------------------------------------
-
-bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len) {
- if(!key || !*key) {
- log2stderr("filename key cannot be empty.");
- return false;
- }
-
- hashed_key_len_set(&jb->filename.key, key, key_len);
-
- return true;
-}
-
-bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len) {
- if(!prefix || !*prefix) {
- log2stderr("filename key cannot be empty.");
- return false;
- }
-
- if(jb->prefix)
- freez((char*)jb->prefix);
-
- jb->prefix = strndupz(prefix, prefix_len);
-
- return true;
-}
-
-bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
- if(!pattern || !*pattern) {
- log2stderr("filename key cannot be empty.");
- return false;
- }
-
- if(jb->pattern)
- freez((char*)jb->pattern);
-
- jb->pattern = strndupz(pattern, pattern_len);
-
- return true;
-}
-
-bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
- if(jb->filter.include.re) {
- log2stderr("FILTER INCLUDE: there is already an include filter set");
- return false;
- }
-
- if(!search_pattern_set(&jb->filter.include, pattern, pattern_len)) {
- log2stderr("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt);
- return false;
- }
-
- return true;
-}
-
-bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
- if(jb->filter.exclude.re) {
- log2stderr("FILTER INCLUDE: there is already an exclude filter set");
- return false;
- }
-
- if(!search_pattern_set(&jb->filter.exclude, pattern, pattern_len)) {
- log2stderr("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt);
- return false;
- }
-
- return true;
-}
-
-// ----------------------------------------------------------------------------
-
-static bool parse_rename(LOG_JOB *jb, const char *param) {
- // Search for '=' in param
- const char *equal_sign = strchr(param, '=');
- if (!equal_sign || equal_sign == param) {
- log2stderr("Error: Invalid rename format, '=' not found in %s", param);
- return false;
- }
-
- const char *new_key = param;
- size_t new_key_len = equal_sign - new_key;
-
- const char *old_key = equal_sign + 1;
- size_t old_key_len = strlen(old_key);
-
- return log_job_rename_add(jb, new_key, new_key_len, old_key, old_key_len);
-}
-
-static bool is_symbol(char c) {
- return !isalpha(c) && !isdigit(c) && !iscntrl(c);
-}
-
-struct {
- const char *keyword;
- int action;
- RW_FLAGS flag;
-} rewrite_flags[] = {
- {"match", 1, RW_MATCH_PCRE2},
- {"match", 0, RW_MATCH_NON_EMPTY},
-
- {"regex", 1, RW_MATCH_PCRE2},
- {"regex", 0, RW_MATCH_NON_EMPTY},
-
- {"pcre2", 1, RW_MATCH_PCRE2},
- {"pcre2", 0, RW_MATCH_NON_EMPTY},
-
- {"non_empty", 1, RW_MATCH_NON_EMPTY},
- {"non_empty", 0, RW_MATCH_PCRE2},
-
- {"non-empty", 1, RW_MATCH_NON_EMPTY},
- {"non-empty", 0, RW_MATCH_PCRE2},
-
- {"not_empty", 1, RW_MATCH_NON_EMPTY},
- {"not_empty", 0, RW_MATCH_PCRE2},
-
- {"not-empty", 1, RW_MATCH_NON_EMPTY},
- {"not-empty", 0, RW_MATCH_PCRE2},
-
- {"stop", 0, RW_DONT_STOP},
- {"no-stop", 1, RW_DONT_STOP},
- {"no_stop", 1, RW_DONT_STOP},
- {"dont-stop", 1, RW_DONT_STOP},
- {"dont_stop", 1, RW_DONT_STOP},
- {"continue", 1, RW_DONT_STOP},
- {"inject", 1, RW_INJECT},
- {"existing", 0, RW_INJECT},
-};
-
-RW_FLAGS parse_rewrite_flags(const char *options) {
- RW_FLAGS flags = RW_MATCH_PCRE2; // Default option
-
- // Tokenize the input options using ","
- char *token;
- char *optionsCopy = strdup(options); // Make a copy to avoid modifying the original
- token = strtok(optionsCopy, ",");
-
- while (token != NULL) {
- // Find the keyword-action mapping
- bool found = false;
-
- for (size_t i = 0; i < sizeof(rewrite_flags) / sizeof(rewrite_flags[0]); i++) {
- if (strcmp(token, rewrite_flags[i].keyword) == 0) {
- if (rewrite_flags[i].action == 1) {
- flags |= rewrite_flags[i].flag; // Set the flag
- } else {
- flags &= ~rewrite_flags[i].flag; // Unset the flag
- }
-
- found = true;
- }
- }
-
- if(!found)
- log2stderr("Warning: rewrite options '%s' is not understood.", token);
-
- // Get the next token
- token = strtok(NULL, ",");
- }
-
- free(optionsCopy); // Free the copied string
-
- return flags;
-}
-
-
-static bool parse_rewrite(LOG_JOB *jb, const char *param) {
- // Search for '=' in param
- const char *equal_sign = strchr(param, '=');
- if (!equal_sign || equal_sign == param) {
- log2stderr("Error: Invalid rewrite format, '=' not found in %s", param);
- return false;
- }
-
- // Get the next character as the separator
- char separator = *(equal_sign + 1);
- if (!separator || !is_symbol(separator)) {
- log2stderr("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param);
- return false;
- }
-
- // Find the next occurrence of the separator
- const char *second_separator = strchr(equal_sign + 2, separator);
- if (!second_separator) {
- log2stderr("Error: rewrite second separator not found in: %s", param);
- return false;
- }
-
- // Check if the search pattern is empty
- if (equal_sign + 1 == second_separator) {
- log2stderr("Error: rewrite search pattern is empty in: %s", param);
- return false;
- }
-
- // Check if the replacement pattern is empty
- if (*(second_separator + 1) == '\0') {
- log2stderr("Error: rewrite replacement pattern is empty in: %s", param);
- return false;
- }
-
- RW_FLAGS flags = RW_MATCH_PCRE2;
- const char *third_separator = strchr(second_separator + 1, separator);
- if(third_separator)
- flags = parse_rewrite_flags(third_separator + 1);
-
- // Extract key, search pattern, and replacement pattern
- char *key = strndupz(param, equal_sign - param);
- char *search_pattern = strndupz(equal_sign + 2, second_separator - (equal_sign + 2));
- char *replace_pattern = third_separator ? strndup(second_separator + 1, third_separator - (second_separator + 1)) : strdupz(second_separator + 1);
-
- if(!*search_pattern)
- flags &= ~RW_MATCH_PCRE2;
-
- bool ret = log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern);
-
- freez(key);
- freez(search_pattern);
- freez(replace_pattern);
-
- return ret;
-}
-
-static bool parse_inject(LOG_JOB *jb, const char *value, bool unmatched) {
- const char *equal = strchr(value, '=');
- if (!equal) {
- log2stderr("Error: injection '%s' does not have an equal sign.", value);
- return false;
- }
-
- const char *key = value;
- const char *val = equal + 1;
- log_job_injection_add(jb, key, equal - key, val, strlen(val), unmatched);
-
- return true;
-}
-
-bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) {
- for (int i = 1; i < argc; i++) {
- char *arg = argv[i];
- if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) {
- log_job_command_line_help(argv[0]);
- exit(0);
- }
-#if defined(NETDATA_DEV_MODE) || defined(NETDATA_INTERNAL_CHECKS)
- else if(strcmp(arg, "--test") == 0) {
- // logfmt_test();
- json_test();
- exit(1);
- }
-#endif
- else if (strcmp(arg, "--show-config") == 0) {
- jb->show_config = true;
- }
- else {
- char buffer[1024];
- char *param = NULL;
- char *value = NULL;
-
- char *equal_sign = strchr(arg, '=');
- if (equal_sign) {
- copy_to_buffer(buffer, sizeof(buffer), arg, equal_sign - arg);
- param = buffer;
- value = equal_sign + 1;
- }
- else {
- param = arg;
- if (i + 1 < argc) {
- value = argv[++i];
- }
- else {
- if (!jb->pattern) {
- log_job_pattern_set(jb, arg, strlen(arg));
- continue;
- } else {
- log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg);
- return false;
- }
- }
- }
-
- if (strcmp(param, "--filename-key") == 0) {
- if(!log_job_filename_key_set(jb, value, value ? strlen(value) : 0))
- return false;
- }
- else if (strcmp(param, "--prefix") == 0) {
- if(!log_job_key_prefix_set(jb, value, value ? strlen(value) : 0))
- return false;
- }
-#ifdef HAVE_LIBYAML
- else if (strcmp(param, "-f") == 0 || strcmp(param, "--file") == 0) {
- if (!yaml_parse_file(value, jb))
- return false;
- }
- else if (strcmp(param, "-c") == 0 || strcmp(param, "--config") == 0) {
- if (!yaml_parse_config(value, jb))
- return false;
- }
-#endif
- else if (strcmp(param, "--unmatched-key") == 0)
- hashed_key_set(&jb->unmatched.key, value);
- else if (strcmp(param, "--inject") == 0) {
- if (!parse_inject(jb, value, false))
- return false;
- }
- else if (strcmp(param, "--inject-unmatched") == 0) {
- if (!parse_inject(jb, value, true))
- return false;
- }
- else if (strcmp(param, "--rewrite") == 0) {
- if (!parse_rewrite(jb, value))
- return false;
- }
- else if (strcmp(param, "--rename") == 0) {
- if (!parse_rename(jb, value))
- return false;
- }
- else if (strcmp(param, "--include") == 0) {
- if (!log_job_include_pattern_set(jb, value, strlen(value)))
- return false;
- }
- else if (strcmp(param, "--exclude") == 0) {
- if (!log_job_exclude_pattern_set(jb, value, strlen(value)))
- return false;
- }
- else {
- i--;
- if (!jb->pattern) {
- log_job_pattern_set(jb, arg, strlen(arg));
- continue;
- } else {
- log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg);
- return false;
- }
- }
- }
- }
-
- // Check if a pattern is set and exactly one pattern is specified
- if (!jb->pattern) {
- log2stderr("Warning: pattern not specified. Try the default config with: -c default");
- log_job_command_line_help(argv[0]);
- return false;
- }
-
- return true;
-}
diff --git a/collectors/log2journal/log2journal-pattern.c b/collectors/log2journal/log2journal-pattern.c
deleted file mode 100644
index 4b7e9026b..000000000
--- a/collectors/log2journal/log2journal-pattern.c
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-void search_pattern_cleanup(SEARCH_PATTERN *sp) {
- if(sp->pattern) {
- freez((void *)sp->pattern);
- sp->pattern = NULL;
- }
-
- if(sp->re) {
- pcre2_code_free(sp->re);
- sp->re = NULL;
- }
-
- if(sp->match_data) {
- pcre2_match_data_free(sp->match_data);
- sp->match_data = NULL;
- }
-
- txt_cleanup(&sp->error);
-}
-
-static void pcre2_error_message(SEARCH_PATTERN *sp, int rc, int pos) {
- char msg[1024];
- pcre2_get_error_in_buffer(msg, sizeof(msg), rc, pos);
- txt_replace(&sp->error, msg, strlen(msg));
-}
-
-static inline bool compile_pcre2(SEARCH_PATTERN *sp) {
- int error_number;
- PCRE2_SIZE error_offset;
- PCRE2_SPTR pattern_ptr = (PCRE2_SPTR)sp->pattern;
-
- sp->re = pcre2_compile(pattern_ptr, PCRE2_ZERO_TERMINATED, 0, &error_number, &error_offset, NULL);
- if (!sp->re) {
- pcre2_error_message(sp, error_number, (int) error_offset);
- return false;
- }
-
- return true;
-}
-
-bool search_pattern_set(SEARCH_PATTERN *sp, const char *search_pattern, size_t search_pattern_len) {
- search_pattern_cleanup(sp);
-
- sp->pattern = strndupz(search_pattern, search_pattern_len);
- if (!compile_pcre2(sp))
- return false;
-
- sp->match_data = pcre2_match_data_create_from_pattern(sp->re, NULL);
-
- return true;
-}
diff --git a/collectors/log2journal/log2journal-pcre2.c b/collectors/log2journal/log2journal-pcre2.c
deleted file mode 100644
index 185e69108..000000000
--- a/collectors/log2journal/log2journal-pcre2.c
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-#define PCRE2_ERROR_LINE_MAX 1024
-#define PCRE2_KEY_MAX 1024
-
-struct pcre2_state {
- LOG_JOB *jb;
-
- const char *line;
- uint32_t pos;
- uint32_t key_start;
-
- pcre2_code *re;
- pcre2_match_data *match_data;
-
- char key[PCRE2_KEY_MAX];
- char msg[PCRE2_ERROR_LINE_MAX];
-};
-
-static inline void copy_and_convert_key(PCRE2_STATE *pcre2, const char *key) {
- char *d = &pcre2->key[pcre2->key_start];
- size_t remaining = sizeof(pcre2->key) - pcre2->key_start;
-
- while(remaining >= 2 && *key) {
- *d = journal_key_characters_map[(unsigned) (*key)];
- remaining--;
- key++;
- d++;
- }
-
- *d = '\0';
-}
-
-static inline void jb_traverse_pcre2_named_groups_and_send_keys(PCRE2_STATE *pcre2, pcre2_code *re, pcre2_match_data *match_data, char *line) {
- PCRE2_SIZE *ovector = pcre2_get_ovector_pointer(match_data);
- uint32_t names_count;
- pcre2_pattern_info(re, PCRE2_INFO_NAMECOUNT, &names_count);
-
- if (names_count > 0) {
- PCRE2_SPTR name_table;
- pcre2_pattern_info(re, PCRE2_INFO_NAMETABLE, &name_table);
- uint32_t name_entry_size;
- pcre2_pattern_info(re, PCRE2_INFO_NAMEENTRYSIZE, &name_entry_size);
-
- const unsigned char *table_ptr = name_table;
- for (uint32_t i = 0; i < names_count; i++) {
- int n = (table_ptr[0] << 8) | table_ptr[1];
- const char *group_name = (const char *)(table_ptr + 2);
-
- PCRE2_SIZE start_offset = ovector[2 * n];
- PCRE2_SIZE end_offset = ovector[2 * n + 1];
- PCRE2_SIZE group_length = end_offset - start_offset;
-
- copy_and_convert_key(pcre2, group_name);
- log_job_send_extracted_key_value(pcre2->jb, pcre2->key, line + start_offset, group_length);
-
- table_ptr += name_entry_size;
- }
- }
-}
-
-void pcre2_get_error_in_buffer(char *msg, size_t msg_len, int rc, int pos) {
- int l;
-
- if(pos >= 0)
- l = snprintf(msg, msg_len, "PCRE2 error %d at pos %d on: ", rc, pos);
- else
- l = snprintf(msg, msg_len, "PCRE2 error %d on: ", rc);
-
- pcre2_get_error_message(rc, (PCRE2_UCHAR *)&msg[l], msg_len - l);
-}
-
-static void pcre2_error_message(PCRE2_STATE *pcre2, int rc, int pos) {
- pcre2_get_error_in_buffer(pcre2->msg, sizeof(pcre2->msg), rc, pos);
-}
-
-bool pcre2_has_error(PCRE2_STATE *pcre2) {
- return !pcre2->re || pcre2->msg[0];
-}
-
-PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb) {
- PCRE2_STATE *pcre2 = mallocz(sizeof(PCRE2_STATE));
- memset(pcre2, 0, sizeof(PCRE2_STATE));
- pcre2->jb = jb;
-
- if(jb->prefix)
- pcre2->key_start = copy_to_buffer(pcre2->key, sizeof(pcre2->key), pcre2->jb->prefix, strlen(pcre2->jb->prefix));
-
- int rc;
- PCRE2_SIZE pos;
- pcre2->re = pcre2_compile((PCRE2_SPTR)jb->pattern, PCRE2_ZERO_TERMINATED, 0, &rc, &pos, NULL);
- if (!pcre2->re) {
- pcre2_error_message(pcre2, rc, pos);
- return pcre2;
- }
-
- pcre2->match_data = pcre2_match_data_create_from_pattern(pcre2->re, NULL);
-
- return pcre2;
-}
-
-void pcre2_parser_destroy(PCRE2_STATE *pcre2) {
- if(pcre2)
- freez(pcre2);
-}
-
-const char *pcre2_parser_error(PCRE2_STATE *pcre2) {
- return pcre2->msg;
-}
-
-bool pcre2_parse_document(PCRE2_STATE *pcre2, const char *txt, size_t len) {
- pcre2->line = txt;
- pcre2->pos = 0;
- pcre2->msg[0] = '\0';
-
- if(!len)
- len = strlen(txt);
-
- int rc = pcre2_match(pcre2->re, (PCRE2_SPTR)pcre2->line, len, 0, 0, pcre2->match_data, NULL);
- if(rc < 0) {
- pcre2_error_message(pcre2, rc, -1);
- return false;
- }
-
- jb_traverse_pcre2_named_groups_and_send_keys(pcre2, pcre2->re, pcre2->match_data, (char *)pcre2->line);
-
- return true;
-}
-
-void pcre2_test(void) {
- LOG_JOB jb = { .prefix = "NIGNX_" };
- PCRE2_STATE *pcre2 = pcre2_parser_create(&jb);
-
- pcre2_parse_document(pcre2, "{\"value\":\"\\u\\u039A\\u03B1\\u03BB\\u03B7\\u03BC\\u03AD\\u03C1\\u03B1\"}", 0);
-
- pcre2_parser_destroy(pcre2);
-}
diff --git a/collectors/log2journal/log2journal-rename.c b/collectors/log2journal/log2journal-rename.c
deleted file mode 100644
index c6975779f..000000000
--- a/collectors/log2journal/log2journal-rename.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-void rename_cleanup(RENAME *rn) {
- hashed_key_cleanup(&rn->new_key);
- hashed_key_cleanup(&rn->old_key);
-}
-
-bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len) {
- if(jb->renames.used >= MAX_RENAMES) {
- log2stderr("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES);
- return false;
- }
-
- RENAME *rn = &jb->renames.array[jb->renames.used++];
- hashed_key_len_set(&rn->new_key, new_key, new_key_len);
- hashed_key_len_set(&rn->old_key, old_key, old_key_len);
-
- return true;
-}
diff --git a/collectors/log2journal/log2journal-replace.c b/collectors/log2journal/log2journal-replace.c
deleted file mode 100644
index 429d615da..000000000
--- a/collectors/log2journal/log2journal-replace.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-void replace_node_free(REPLACE_NODE *rpn) {
- hashed_key_cleanup(&rpn->name);
- rpn->next = NULL;
- freez(rpn);
-}
-
-void replace_pattern_cleanup(REPLACE_PATTERN *rp) {
- if(rp->pattern) {
- freez((void *)rp->pattern);
- rp->pattern = NULL;
- }
-
- while(rp->nodes) {
- REPLACE_NODE *rpn = rp->nodes;
- rp->nodes = rpn->next;
- replace_node_free(rpn);
- }
-}
-
-static REPLACE_NODE *replace_pattern_add_node(REPLACE_NODE **head, bool is_variable, const char *text) {
- REPLACE_NODE *new_node = callocz(1, sizeof(REPLACE_NODE));
- if (!new_node)
- return NULL;
-
- hashed_key_set(&new_node->name, text);
- new_node->is_variable = is_variable;
- new_node->next = NULL;
-
- if (*head == NULL)
- *head = new_node;
-
- else {
- REPLACE_NODE *current = *head;
-
- // append it
- while (current->next != NULL)
- current = current->next;
-
- current->next = new_node;
- }
-
- return new_node;
-}
-
-bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) {
- replace_pattern_cleanup(rp);
-
- rp->pattern = strdupz(pattern);
- const char *current = rp->pattern;
-
- while (*current != '\0') {
- if (*current == '$' && *(current + 1) == '{') {
- // Start of a variable
- const char *end = strchr(current, '}');
- if (!end) {
- log2stderr("Error: Missing closing brace in replacement pattern: %s", rp->pattern);
- return false;
- }
-
- size_t name_length = end - current - 2; // Length of the variable name
- char *variable_name = strndupz(current + 2, name_length);
- if (!variable_name) {
- log2stderr("Error: Memory allocation failed for variable name.");
- return false;
- }
-
- REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), true, variable_name);
- if (!node) {
- freez(variable_name);
- log2stderr("Error: Failed to add replacement node for variable.");
- return false;
- }
-
- current = end + 1; // Move past the variable
- }
- else {
- // Start of literal text
- const char *start = current;
- while (*current != '\0' && !(*current == '$' && *(current + 1) == '{')) {
- current++;
- }
-
- size_t text_length = current - start;
- char *text = strndupz(start, text_length);
- if (!text) {
- log2stderr("Error: Memory allocation failed for literal text.");
- return false;
- }
-
- REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), false, text);
- if (!node) {
- freez(text);
- log2stderr("Error: Failed to add replacement node for text.");
- return false;
- }
- }
- }
-
- for(REPLACE_NODE *node = rp->nodes; node; node = node->next) {
- if(node->is_variable) {
- rp->has_variables = true;
- break;
- }
- }
-
- return true;
-}
diff --git a/collectors/log2journal/log2journal-rewrite.c b/collectors/log2journal/log2journal-rewrite.c
deleted file mode 100644
index 112391bf0..000000000
--- a/collectors/log2journal/log2journal-rewrite.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-void rewrite_cleanup(REWRITE *rw) {
- hashed_key_cleanup(&rw->key);
-
- if(rw->flags & RW_MATCH_PCRE2)
- search_pattern_cleanup(&rw->match_pcre2);
- else if(rw->flags & RW_MATCH_NON_EMPTY)
- replace_pattern_cleanup(&rw->match_non_empty);
-
- replace_pattern_cleanup(&rw->value);
- rw->flags = RW_NONE;
-}
-
-bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern) {
- if(jb->rewrites.used >= MAX_REWRITES) {
- log2stderr("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES);
- return false;
- }
-
- if((flags & (RW_MATCH_PCRE2|RW_MATCH_NON_EMPTY)) && (!search_pattern || !*search_pattern)) {
- log2stderr("Error: rewrite for key '%s' does not specify a search pattern.", key);
- return false;
- }
-
- REWRITE *rw = &jb->rewrites.array[jb->rewrites.used++];
- rw->flags = flags;
-
- hashed_key_set(&rw->key, key);
-
- if((flags & RW_MATCH_PCRE2) && !search_pattern_set(&rw->match_pcre2, search_pattern, strlen(search_pattern))) {
- rewrite_cleanup(rw);
- jb->rewrites.used--;
- return false;
- }
- else if((flags & RW_MATCH_NON_EMPTY) && !replace_pattern_set(&rw->match_non_empty, search_pattern)) {
- rewrite_cleanup(rw);
- jb->rewrites.used--;
- return false;
- }
-
- if(replace_pattern && *replace_pattern && !replace_pattern_set(&rw->value, replace_pattern)) {
- rewrite_cleanup(rw);
- jb->rewrites.used--;
- return false;
- }
-
- return true;
-}
diff --git a/collectors/log2journal/log2journal-yaml.c b/collectors/log2journal/log2journal-yaml.c
deleted file mode 100644
index 862e7bf4b..000000000
--- a/collectors/log2journal/log2journal-yaml.c
+++ /dev/null
@@ -1,964 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-// ----------------------------------------------------------------------------
-// yaml configuration file
-
-#ifdef HAVE_LIBYAML
-
-static const char *yaml_event_name(yaml_event_type_t type) {
- switch (type) {
- case YAML_NO_EVENT:
- return "YAML_NO_EVENT";
-
- case YAML_SCALAR_EVENT:
- return "YAML_SCALAR_EVENT";
-
- case YAML_ALIAS_EVENT:
- return "YAML_ALIAS_EVENT";
-
- case YAML_MAPPING_START_EVENT:
- return "YAML_MAPPING_START_EVENT";
-
- case YAML_MAPPING_END_EVENT:
- return "YAML_MAPPING_END_EVENT";
-
- case YAML_SEQUENCE_START_EVENT:
- return "YAML_SEQUENCE_START_EVENT";
-
- case YAML_SEQUENCE_END_EVENT:
- return "YAML_SEQUENCE_END_EVENT";
-
- case YAML_STREAM_START_EVENT:
- return "YAML_STREAM_START_EVENT";
-
- case YAML_STREAM_END_EVENT:
- return "YAML_STREAM_END_EVENT";
-
- case YAML_DOCUMENT_START_EVENT:
- return "YAML_DOCUMENT_START_EVENT";
-
- case YAML_DOCUMENT_END_EVENT:
- return "YAML_DOCUMENT_END_EVENT";
-
- default:
- return "UNKNOWN";
- }
-}
-
-#define yaml_error(parser, event, fmt, args...) yaml_error_with_trace(parser, event, __LINE__, __FUNCTION__, __FILE__, fmt, ##args)
-static void yaml_error_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line, const char *function, const char *file, const char *format, ...) __attribute__ ((format(__printf__, 6, 7)));
-static void yaml_error_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line, const char *function, const char *file, const char *format, ...) {
- char buf[1024] = ""; // Initialize buf to an empty string
- const char *type = "";
-
- if(event) {
- type = yaml_event_name(event->type);
-
- switch (event->type) {
- case YAML_SCALAR_EVENT:
- copy_to_buffer(buf, sizeof(buf), (char *)event->data.scalar.value, event->data.scalar.length);
- break;
-
- case YAML_ALIAS_EVENT:
- snprintf(buf, sizeof(buf), "%s", event->data.alias.anchor);
- break;
-
- default:
- break;
- }
- }
-
- fprintf(stderr, "YAML %zu@%s, %s(): (line %d, column %d, %s%s%s): ",
- line, file, function,
- (int)(parser->mark.line + 1), (int)(parser->mark.column + 1),
- type, buf[0]? ", near ": "", buf);
-
- va_list args;
- va_start(args, format);
- vfprintf(stderr, format, args);
- va_end(args);
- fprintf(stderr, "\n");
-}
-
-#define yaml_parse(parser, event) yaml_parse_with_trace(parser, event, __LINE__, __FUNCTION__, __FILE__)
-static bool yaml_parse_with_trace(yaml_parser_t *parser, yaml_event_t *event, size_t line __maybe_unused, const char *function __maybe_unused, const char *file __maybe_unused) {
- if (!yaml_parser_parse(parser, event)) {
- yaml_error(parser, NULL, "YAML parser error %d", parser->error);
- return false;
- }
-
-// fprintf(stderr, ">>> %s >>> %.*s\n",
-// yaml_event_name(event->type),
-// event->type == YAML_SCALAR_EVENT ? event->data.scalar.length : 0,
-// event->type == YAML_SCALAR_EVENT ? (char *)event->data.scalar.value : "");
-
- return true;
-}
-
-#define yaml_parse_expect_event(parser, type) yaml_parse_expect_event_with_trace(parser, type, __LINE__, __FUNCTION__, __FILE__)
-static bool yaml_parse_expect_event_with_trace(yaml_parser_t *parser, yaml_event_type_t type, size_t line, const char *function, const char *file) {
- yaml_event_t event;
- if (!yaml_parse(parser, &event))
- return false;
-
- bool ret = true;
- if(event.type != type) {
- yaml_error_with_trace(parser, &event, line, function, file, "unexpected event - expecting: %s", yaml_event_name(type));
- ret = false;
- }
-// else
-// fprintf(stderr, "OK (%zu@%s, %s()\n", line, file, function);
-
- yaml_event_delete(&event);
- return ret;
-}
-
-#define yaml_scalar_matches(event, s, len) yaml_scalar_matches_with_trace(event, s, len, __LINE__, __FUNCTION__, __FILE__)
-static bool yaml_scalar_matches_with_trace(yaml_event_t *event, const char *s, size_t len, size_t line __maybe_unused, const char *function __maybe_unused, const char *file __maybe_unused) {
- if(event->type != YAML_SCALAR_EVENT)
- return false;
-
- if(len != event->data.scalar.length)
- return false;
-// else
-// fprintf(stderr, "OK (%zu@%s, %s()\n", line, file, function);
-
- return strcmp((char *)event->data.scalar.value, s) == 0;
-}
-
-// ----------------------------------------------------------------------------
-
-static size_t yaml_parse_filename_injection(yaml_parser_t *parser, LOG_JOB *jb) {
- yaml_event_t event;
- size_t errors = 0;
-
- if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT))
- return 1;
-
- if (!yaml_parse(parser, &event))
- return 1;
-
- if (yaml_scalar_matches(&event, "key", strlen("key"))) {
- yaml_event_t sub_event;
- if (!yaml_parse(parser, &sub_event))
- errors++;
-
- else {
- if (sub_event.type == YAML_SCALAR_EVENT) {
- if(!log_job_filename_key_set(jb, (char *) sub_event.data.scalar.value,
- sub_event.data.scalar.length))
- errors++;
- }
-
- else {
- yaml_error(parser, &sub_event, "expected the filename as %s", yaml_event_name(YAML_SCALAR_EVENT));
- errors++;
- }
-
- yaml_event_delete(&sub_event);
- }
- }
-
- if(!yaml_parse_expect_event(parser, YAML_MAPPING_END_EVENT))
- errors++;
-
- yaml_event_delete(&event);
- return errors;
-}
-
-static size_t yaml_parse_filters(yaml_parser_t *parser, LOG_JOB *jb) {
- if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT))
- return 1;
-
- size_t errors = 0;
- bool finished = false;
-
- while(!errors && !finished) {
- yaml_event_t event;
-
- if(!yaml_parse(parser, &event))
- return 1;
-
- if(event.type == YAML_SCALAR_EVENT) {
- if(yaml_scalar_matches(&event, "include", strlen("include"))) {
- yaml_event_t sub_event;
- if(!yaml_parse(parser, &sub_event))
- errors++;
-
- else {
- if(sub_event.type == YAML_SCALAR_EVENT) {
- if(!log_job_include_pattern_set(jb, (char *) sub_event.data.scalar.value,
- sub_event.data.scalar.length))
- errors++;
- }
-
- else {
- yaml_error(parser, &sub_event, "expected the include as %s",
- yaml_event_name(YAML_SCALAR_EVENT));
- errors++;
- }
-
- yaml_event_delete(&sub_event);
- }
- }
- else if(yaml_scalar_matches(&event, "exclude", strlen("exclude"))) {
- yaml_event_t sub_event;
- if(!yaml_parse(parser, &sub_event))
- errors++;
-
- else {
- if(sub_event.type == YAML_SCALAR_EVENT) {
- if(!log_job_exclude_pattern_set(jb,(char *) sub_event.data.scalar.value,
- sub_event.data.scalar.length))
- errors++;
- }
-
- else {
- yaml_error(parser, &sub_event, "expected the exclude as %s",
- yaml_event_name(YAML_SCALAR_EVENT));
- errors++;
- }
-
- yaml_event_delete(&sub_event);
- }
- }
- }
- else if(event.type == YAML_MAPPING_END_EVENT)
- finished = true;
- else {
- yaml_error(parser, &event, "expected %s or %s",
- yaml_event_name(YAML_SCALAR_EVENT),
- yaml_event_name(YAML_MAPPING_END_EVENT));
- errors++;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors;
-}
-
-static size_t yaml_parse_prefix(yaml_parser_t *parser, LOG_JOB *jb) {
- yaml_event_t event;
- size_t errors = 0;
-
- if (!yaml_parse(parser, &event))
- return 1;
-
- if (event.type == YAML_SCALAR_EVENT) {
- if(!log_job_key_prefix_set(jb, (char *) event.data.scalar.value, event.data.scalar.length))
- errors++;
- }
-
- yaml_event_delete(&event);
- return errors;
-}
-
-static bool yaml_parse_constant_field_injection(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) {
- yaml_event_t event;
- if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &event, "Expected scalar for constant field injection key");
- yaml_event_delete(&event);
- return false;
- }
-
- char *key = strndupz((char *)event.data.scalar.value, event.data.scalar.length);
- char *value = NULL;
- bool ret = false;
-
- yaml_event_delete(&event);
-
- if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &event, "Expected scalar for constant field injection value");
- goto cleanup;
- }
-
- if(!yaml_scalar_matches(&event, "value", strlen("value"))) {
- yaml_error(parser, &event, "Expected scalar 'value'");
- goto cleanup;
- }
-
- if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &event, "Expected scalar for constant field injection value");
- goto cleanup;
- }
-
- value = strndupz((char *)event.data.scalar.value, event.data.scalar.length);
-
- if(!log_job_injection_add(jb, key, strlen(key), value, strlen(value), unmatched))
- ret = false;
- else
- ret = true;
-
- ret = true;
-
-cleanup:
- yaml_event_delete(&event);
- freez(key);
- freez(value);
- return !ret ? 1 : 0;
-}
-
-static bool yaml_parse_injection_mapping(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) {
- yaml_event_t event;
- size_t errors = 0;
- bool finished = false;
-
- while (!errors && !finished) {
- if (!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch (event.type) {
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&event, "key", strlen("key"))) {
- errors += yaml_parse_constant_field_injection(parser, jb, unmatched);
- } else {
- yaml_error(parser, &event, "Unexpected scalar in injection mapping");
- errors++;
- }
- break;
-
- case YAML_MAPPING_END_EVENT:
- finished = true;
- break;
-
- default:
- yaml_error(parser, &event, "Unexpected event in injection mapping");
- errors++;
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors == 0;
-}
-
-static size_t yaml_parse_injections(yaml_parser_t *parser, LOG_JOB *jb, bool unmatched) {
- yaml_event_t event;
- size_t errors = 0;
- bool finished = false;
-
- if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT))
- return 1;
-
- while (!errors && !finished) {
- if (!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch (event.type) {
- case YAML_MAPPING_START_EVENT:
- if (!yaml_parse_injection_mapping(parser, jb, unmatched))
- errors++;
- break;
-
- case YAML_SEQUENCE_END_EVENT:
- finished = true;
- break;
-
- default:
- yaml_error(parser, &event, "Unexpected event in injections sequence");
- errors++;
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors;
-}
-
-static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) {
- size_t errors = 0;
- bool finished = false;
-
- if (!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT))
- return 1;
-
- while (!errors && !finished) {
- yaml_event_t event;
- if (!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch (event.type) {
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&event, "key", strlen("key"))) {
- yaml_event_t sub_event;
- if (!yaml_parse(parser, &sub_event)) {
- errors++;
- } else {
- if (sub_event.type == YAML_SCALAR_EVENT) {
- hashed_key_len_set(&jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- } else {
- yaml_error(parser, &sub_event, "expected a scalar value for 'key'");
- errors++;
- }
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&event, "inject", strlen("inject"))) {
- errors += yaml_parse_injections(parser, jb, true);
- } else {
- yaml_error(parser, &event, "Unexpected scalar in unmatched section");
- errors++;
- }
- break;
-
- case YAML_MAPPING_END_EVENT:
- finished = true;
- break;
-
- default:
- yaml_error(parser, &event, "Unexpected event in unmatched section");
- errors++;
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors;
-}
-
-static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) {
- size_t errors = 0;
-
- if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT))
- return 1;
-
- bool finished = false;
- while (!errors && !finished) {
- yaml_event_t event;
- if (!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch (event.type) {
- case YAML_MAPPING_START_EVENT:
- {
- RW_FLAGS flags = RW_NONE;
- char *key = NULL;
- char *search_pattern = NULL;
- char *replace_pattern = NULL;
-
- bool mapping_finished = false;
- while (!errors && !mapping_finished) {
- yaml_event_t sub_event;
- if (!yaml_parse(parser, &sub_event)) {
- errors++;
- continue;
- }
-
- switch (sub_event.type) {
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&sub_event, "key", strlen("key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite key");
- errors++;
- } else {
- key = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "match", strlen("match"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite match PCRE2 pattern");
- errors++;
- }
- else {
- if(search_pattern)
- freez(search_pattern);
- flags |= RW_MATCH_PCRE2;
- flags &= ~RW_MATCH_NON_EMPTY;
- search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "not_empty", strlen("not_empty"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite not empty condition");
- errors++;
- }
- else {
- if(search_pattern)
- freez(search_pattern);
- flags |= RW_MATCH_NON_EMPTY;
- flags &= ~RW_MATCH_PCRE2;
- search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "value", strlen("value"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite value");
- errors++;
- } else {
- replace_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "stop", strlen("stop"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite stop boolean");
- errors++;
- } else {
- if(strncmp((char*)sub_event.data.scalar.value, "no", 2) == 0 ||
- strncmp((char*)sub_event.data.scalar.value, "false", 5) == 0)
- flags |= RW_DONT_STOP;
- else
- flags &= ~RW_DONT_STOP;
-
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "inject", strlen("inject"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite inject boolean");
- errors++;
- } else {
- if(strncmp((char*)sub_event.data.scalar.value, "yes", 3) == 0 ||
- strncmp((char*)sub_event.data.scalar.value, "true", 4) == 0)
- flags |= RW_INJECT;
- else
- flags &= ~RW_INJECT;
-
- yaml_event_delete(&sub_event);
- }
- } else {
- yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping");
- errors++;
- }
- break;
-
- case YAML_MAPPING_END_EVENT:
- if(key) {
- if (!log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern))
- errors++;
- }
-
- freez(key);
- key = NULL;
-
- freez(search_pattern);
- search_pattern = NULL;
-
- freez(replace_pattern);
- replace_pattern = NULL;
-
- flags = RW_NONE;
-
- mapping_finished = true;
- break;
-
- default:
- yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping");
- errors++;
- break;
- }
-
- yaml_event_delete(&sub_event);
- }
- }
- break;
-
- case YAML_SEQUENCE_END_EVENT:
- finished = true;
- break;
-
- default:
- yaml_error(parser, &event, "Unexpected event in rewrites sequence");
- errors++;
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors;
-}
-
-static size_t yaml_parse_renames(yaml_parser_t *parser, LOG_JOB *jb) {
- size_t errors = 0;
-
- if (!yaml_parse_expect_event(parser, YAML_SEQUENCE_START_EVENT))
- return 1;
-
- bool finished = false;
- while (!errors && !finished) {
- yaml_event_t event;
- if (!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch (event.type) {
- case YAML_MAPPING_START_EVENT:
- {
- struct key_rename rn = { 0 };
-
- bool mapping_finished = false;
- while (!errors && !mapping_finished) {
- yaml_event_t sub_event;
- if (!yaml_parse(parser, &sub_event)) {
- errors++;
- continue;
- }
-
- switch (sub_event.type) {
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&sub_event, "new_key", strlen("new_key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rename new_key");
- errors++;
- } else {
- hashed_key_len_set(&rn.new_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "old_key", strlen("old_key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rename old_key");
- errors++;
- } else {
- hashed_key_len_set(&rn.old_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else {
- yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping");
- errors++;
- }
- break;
-
- case YAML_MAPPING_END_EVENT:
- if(rn.old_key.key && rn.new_key.key) {
- if (!log_job_rename_add(jb, rn.new_key.key, rn.new_key.len,
- rn.old_key.key, rn.old_key.len))
- errors++;
- }
- rename_cleanup(&rn);
-
- mapping_finished = true;
- break;
-
- default:
- yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping");
- errors++;
- break;
- }
-
- yaml_event_delete(&sub_event);
- }
- }
- break;
-
- case YAML_SEQUENCE_END_EVENT:
- finished = true;
- break;
-
- default:
- yaml_error(parser, &event, "Unexpected event in rewrites sequence");
- errors++;
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- return errors;
-}
-
-static size_t yaml_parse_pattern(yaml_parser_t *parser, LOG_JOB *jb) {
- yaml_event_t event;
- size_t errors = 0;
-
- if (!yaml_parse(parser, &event))
- return 1;
-
- if(event.type == YAML_SCALAR_EVENT)
- log_job_pattern_set(jb, (char *) event.data.scalar.value, event.data.scalar.length);
- else {
- yaml_error(parser, &event, "unexpected event type");
- errors++;
- }
-
- yaml_event_delete(&event);
- return errors;
-}
-
-static size_t yaml_parse_initialized(yaml_parser_t *parser, LOG_JOB *jb) {
- size_t errors = 0;
-
- if(!yaml_parse_expect_event(parser, YAML_STREAM_START_EVENT)) {
- errors++;
- goto cleanup;
- }
-
- if(!yaml_parse_expect_event(parser, YAML_DOCUMENT_START_EVENT)) {
- errors++;
- goto cleanup;
- }
-
- if(!yaml_parse_expect_event(parser, YAML_MAPPING_START_EVENT)) {
- errors++;
- goto cleanup;
- }
-
- bool finished = false;
- while (!errors && !finished) {
- yaml_event_t event;
- if(!yaml_parse(parser, &event)) {
- errors++;
- continue;
- }
-
- switch(event.type) {
- default:
- yaml_error(parser, &event, "unexpected type");
- errors++;
- break;
-
- case YAML_MAPPING_END_EVENT:
- finished = true;
- break;
-
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&event, "pattern", strlen("pattern")))
- errors += yaml_parse_pattern(parser, jb);
-
- else if (yaml_scalar_matches(&event, "prefix", strlen("prefix")))
- errors += yaml_parse_prefix(parser, jb);
-
- else if (yaml_scalar_matches(&event, "filename", strlen("filename")))
- errors += yaml_parse_filename_injection(parser, jb);
-
- else if (yaml_scalar_matches(&event, "filter", strlen("filter")))
- errors += yaml_parse_filters(parser, jb);
-
- else if (yaml_scalar_matches(&event, "inject", strlen("inject")))
- errors += yaml_parse_injections(parser, jb, false);
-
- else if (yaml_scalar_matches(&event, "unmatched", strlen("unmatched")))
- errors += yaml_parse_unmatched(parser, jb);
-
- else if (yaml_scalar_matches(&event, "rewrite", strlen("rewrite")))
- errors += yaml_parse_rewrites(parser, jb);
-
- else if (yaml_scalar_matches(&event, "rename", strlen("rename")))
- errors += yaml_parse_renames(parser, jb);
-
- else {
- yaml_error(parser, &event, "unexpected scalar");
- errors++;
- }
- break;
- }
-
- yaml_event_delete(&event);
- }
-
- if(!errors && !yaml_parse_expect_event(parser, YAML_DOCUMENT_END_EVENT)) {
- errors++;
- goto cleanup;
- }
-
- if(!errors && !yaml_parse_expect_event(parser, YAML_STREAM_END_EVENT)) {
- errors++;
- goto cleanup;
- }
-
-cleanup:
- return errors;
-}
-
-bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb) {
- if(!config_file_path || !*config_file_path) {
- log2stderr("yaml configuration filename cannot be empty.");
- return false;
- }
-
- FILE *fp = fopen(config_file_path, "r");
- if (!fp) {
- log2stderr("Error opening config file: %s", config_file_path);
- return false;
- }
-
- yaml_parser_t parser;
- yaml_parser_initialize(&parser);
- yaml_parser_set_input_file(&parser, fp);
-
- size_t errors = yaml_parse_initialized(&parser, jb);
-
- yaml_parser_delete(&parser);
- fclose(fp);
- return errors == 0;
-}
-
-bool yaml_parse_config(const char *config_name, LOG_JOB *jb) {
- char filename[FILENAME_MAX + 1];
-
- snprintf(filename, sizeof(filename), "%s/%s.yaml", LOG2JOURNAL_CONFIG_PATH, config_name);
- return yaml_parse_file(filename, jb);
-}
-
-#endif // HAVE_LIBYAML
-
-// ----------------------------------------------------------------------------
-// printing yaml
-
-static void yaml_print_multiline_value(const char *s, size_t depth) {
- if (!s)
- s = "";
-
- do {
- const char* next = strchr(s, '\n');
- if(next) next++;
-
- size_t len = next ? (size_t)(next - s) : strlen(s);
- char buf[len + 1];
- copy_to_buffer(buf, sizeof(buf), s, len);
-
- fprintf(stderr, "%.*s%s%s",
- (int)(depth * 2), " ",
- buf, next ? "" : "\n");
-
- s = next;
- } while(s && *s);
-}
-
-static bool needs_quotes_in_yaml(const char *str) {
- // Lookup table for special YAML characters
- static bool special_chars[256] = { false };
- static bool table_initialized = false;
-
- if (!table_initialized) {
- // Initialize the lookup table
- const char *special_chars_str = ":{}[],&*!|>'\"%@`^";
- for (const char *c = special_chars_str; *c; ++c) {
- special_chars[(unsigned char)*c] = true;
- }
- table_initialized = true;
- }
-
- while (*str) {
- if (special_chars[(unsigned char)*str]) {
- return true;
- }
- str++;
- }
- return false;
-}
-
-static void yaml_print_node(const char *key, const char *value, size_t depth, bool dash) {
- if(depth > 10) depth = 10;
- const char *quote = "'";
-
- const char *second_line = NULL;
- if(value && strchr(value, '\n')) {
- second_line = value;
- value = "|";
- quote = "";
- }
- else if(!value || !needs_quotes_in_yaml(value))
- quote = "";
-
- fprintf(stderr, "%.*s%s%s%s%s%s%s\n",
- (int)(depth * 2), " ", dash ? "- ": "",
- key ? key : "", key ? ": " : "",
- quote, value ? value : "", quote);
-
- if(second_line) {
- yaml_print_multiline_value(second_line, depth + 1);
- }
-}
-
-void log_job_configuration_to_yaml(LOG_JOB *jb) {
- if(jb->pattern)
- yaml_print_node("pattern", jb->pattern, 0, false);
-
- if(jb->prefix) {
- fprintf(stderr, "\n");
- yaml_print_node("prefix", jb->prefix, 0, false);
- }
-
- if(jb->filename.key.key) {
- fprintf(stderr, "\n");
- yaml_print_node("filename", NULL, 0, false);
- yaml_print_node("key", jb->filename.key.key, 1, false);
- }
-
- if(jb->filter.include.pattern || jb->filter.exclude.pattern) {
- fprintf(stderr, "\n");
- yaml_print_node("filter", NULL, 0, false);
-
- if(jb->filter.include.pattern)
- yaml_print_node("include", jb->filter.include.pattern, 1, false);
-
- if(jb->filter.exclude.pattern)
- yaml_print_node("exclude", jb->filter.exclude.pattern, 1, false);
- }
-
- if(jb->renames.used) {
- fprintf(stderr, "\n");
- yaml_print_node("rename", NULL, 0, false);
-
- for(size_t i = 0; i < jb->renames.used ;i++) {
- yaml_print_node("new_key", jb->renames.array[i].new_key.key, 1, true);
- yaml_print_node("old_key", jb->renames.array[i].old_key.key, 2, false);
- }
- }
-
- if(jb->injections.used) {
- fprintf(stderr, "\n");
- yaml_print_node("inject", NULL, 0, false);
-
- for (size_t i = 0; i < jb->injections.used; i++) {
- yaml_print_node("key", jb->injections.keys[i].key.key, 1, true);
- yaml_print_node("value", jb->injections.keys[i].value.pattern, 2, false);
- }
- }
-
- if(jb->rewrites.used) {
- fprintf(stderr, "\n");
- yaml_print_node("rewrite", NULL, 0, false);
-
- for(size_t i = 0; i < jb->rewrites.used ;i++) {
- REWRITE *rw = &jb->rewrites.array[i];
-
- yaml_print_node("key", rw->key.key, 1, true);
-
- if(rw->flags & RW_MATCH_PCRE2)
- yaml_print_node("match", rw->match_pcre2.pattern, 2, false);
-
- else if(rw->flags & RW_MATCH_NON_EMPTY)
- yaml_print_node("not_empty", rw->match_non_empty.pattern, 2, false);
-
- yaml_print_node("value", rw->value.pattern, 2, false);
-
- if(rw->flags & RW_INJECT)
- yaml_print_node("inject", "yes", 2, false);
-
- if(rw->flags & RW_DONT_STOP)
- yaml_print_node("stop", "no", 2, false);
- }
- }
-
- if(jb->unmatched.key.key || jb->unmatched.injections.used) {
- fprintf(stderr, "\n");
- yaml_print_node("unmatched", NULL, 0, false);
-
- if(jb->unmatched.key.key)
- yaml_print_node("key", jb->unmatched.key.key, 1, false);
-
- if(jb->unmatched.injections.used) {
- fprintf(stderr, "\n");
- yaml_print_node("inject", NULL, 1, false);
-
- for (size_t i = 0; i < jb->unmatched.injections.used; i++) {
- yaml_print_node("key", jb->unmatched.injections.keys[i].key.key, 2, true);
- yaml_print_node("value", jb->unmatched.injections.keys[i].value.pattern, 3, false);
- }
- }
- }
-}
diff --git a/collectors/log2journal/log2journal.c b/collectors/log2journal/log2journal.c
deleted file mode 100644
index c3204939c..000000000
--- a/collectors/log2journal/log2journal.c
+++ /dev/null
@@ -1,569 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "log2journal.h"
-
-// ----------------------------------------------------------------------------
-
-const char journal_key_characters_map[256] = {
- // control characters
- [0] = '\0', [1] = '_', [2] = '_', [3] = '_', [4] = '_', [5] = '_', [6] = '_', [7] = '_',
- [8] = '_', [9] = '_', [10] = '_', [11] = '_', [12] = '_', [13] = '_', [14] = '_', [15] = '_',
- [16] = '_', [17] = '_', [18] = '_', [19] = '_', [20] = '_', [21] = '_', [22] = '_', [23] = '_',
- [24] = '_', [25] = '_', [26] = '_', [27] = '_', [28] = '_', [29] = '_', [30] = '_', [31] = '_',
-
- // symbols
- [' '] = '_', ['!'] = '_', ['"'] = '_', ['#'] = '_', ['$'] = '_', ['%'] = '_', ['&'] = '_', ['\''] = '_',
- ['('] = '_', [')'] = '_', ['*'] = '_', ['+'] = '_', [','] = '_', ['-'] = '_', ['.'] = '_', ['/'] = '_',
-
- // numbers
- ['0'] = '0', ['1'] = '1', ['2'] = '2', ['3'] = '3', ['4'] = '4', ['5'] = '5', ['6'] = '6', ['7'] = '7',
- ['8'] = '8', ['9'] = '9',
-
- // symbols
- [':'] = '_', [';'] = '_', ['<'] = '_', ['='] = '_', ['>'] = '_', ['?'] = '_', ['@'] = '_',
-
- // capitals
- ['A'] = 'A', ['B'] = 'B', ['C'] = 'C', ['D'] = 'D', ['E'] = 'E', ['F'] = 'F', ['G'] = 'G', ['H'] = 'H',
- ['I'] = 'I', ['J'] = 'J', ['K'] = 'K', ['L'] = 'L', ['M'] = 'M', ['N'] = 'N', ['O'] = 'O', ['P'] = 'P',
- ['Q'] = 'Q', ['R'] = 'R', ['S'] = 'S', ['T'] = 'T', ['U'] = 'U', ['V'] = 'V', ['W'] = 'W', ['X'] = 'X',
- ['Y'] = 'Y', ['Z'] = 'Z',
-
- // symbols
- ['['] = '_', ['\\'] = '_', [']'] = '_', ['^'] = '_', ['_'] = '_', ['`'] = '_',
-
- // lower to upper
- ['a'] = 'A', ['b'] = 'B', ['c'] = 'C', ['d'] = 'D', ['e'] = 'E', ['f'] = 'F', ['g'] = 'G', ['h'] = 'H',
- ['i'] = 'I', ['j'] = 'J', ['k'] = 'K', ['l'] = 'L', ['m'] = 'M', ['n'] = 'N', ['o'] = 'O', ['p'] = 'P',
- ['q'] = 'Q', ['r'] = 'R', ['s'] = 'S', ['t'] = 'T', ['u'] = 'U', ['v'] = 'V', ['w'] = 'W', ['x'] = 'X',
- ['y'] = 'Y', ['z'] = 'Z',
-
- // symbols
- ['{'] = '_', ['|'] = '_', ['}'] = '_', ['~'] = '_', [127] = '_', // Delete (DEL)
-
- // Extended ASCII characters (128-255) set to underscore
- [128] = '_', [129] = '_', [130] = '_', [131] = '_', [132] = '_', [133] = '_', [134] = '_', [135] = '_',
- [136] = '_', [137] = '_', [138] = '_', [139] = '_', [140] = '_', [141] = '_', [142] = '_', [143] = '_',
- [144] = '_', [145] = '_', [146] = '_', [147] = '_', [148] = '_', [149] = '_', [150] = '_', [151] = '_',
- [152] = '_', [153] = '_', [154] = '_', [155] = '_', [156] = '_', [157] = '_', [158] = '_', [159] = '_',
- [160] = '_', [161] = '_', [162] = '_', [163] = '_', [164] = '_', [165] = '_', [166] = '_', [167] = '_',
- [168] = '_', [169] = '_', [170] = '_', [171] = '_', [172] = '_', [173] = '_', [174] = '_', [175] = '_',
- [176] = '_', [177] = '_', [178] = '_', [179] = '_', [180] = '_', [181] = '_', [182] = '_', [183] = '_',
- [184] = '_', [185] = '_', [186] = '_', [187] = '_', [188] = '_', [189] = '_', [190] = '_', [191] = '_',
- [192] = '_', [193] = '_', [194] = '_', [195] = '_', [196] = '_', [197] = '_', [198] = '_', [199] = '_',
- [200] = '_', [201] = '_', [202] = '_', [203] = '_', [204] = '_', [205] = '_', [206] = '_', [207] = '_',
- [208] = '_', [209] = '_', [210] = '_', [211] = '_', [212] = '_', [213] = '_', [214] = '_', [215] = '_',
- [216] = '_', [217] = '_', [218] = '_', [219] = '_', [220] = '_', [221] = '_', [222] = '_', [223] = '_',
- [224] = '_', [225] = '_', [226] = '_', [227] = '_', [228] = '_', [229] = '_', [230] = '_', [231] = '_',
- [232] = '_', [233] = '_', [234] = '_', [235] = '_', [236] = '_', [237] = '_', [238] = '_', [239] = '_',
- [240] = '_', [241] = '_', [242] = '_', [243] = '_', [244] = '_', [245] = '_', [246] = '_', [247] = '_',
- [248] = '_', [249] = '_', [250] = '_', [251] = '_', [252] = '_', [253] = '_', [254] = '_', [255] = '_',
-};
-
-// ----------------------------------------------------------------------------
-
-static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) {
- if(k->flags & HK_HASHTABLE_ALLOCATED)
- return k;
-
- if(!k->hashtable_ptr) {
- HASHED_KEY *ht_key;
- SIMPLE_HASHTABLE_SLOT_KEY *slot = simple_hashtable_get_slot_KEY(&jb->hashtable, k->hash, true);
- if((ht_key = SIMPLE_HASHTABLE_SLOT_DATA(slot))) {
- if(!(ht_key->flags & HK_COLLISION_CHECKED)) {
- ht_key->flags |= HK_COLLISION_CHECKED;
-
- if(strcmp(ht_key->key, k->key) != 0)
- log2stderr("Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). "
- "Please file a bug report.", ht_key->key, (unsigned long) ht_key->hash, k->key
- , (unsigned long) k->hash
- );
- }
- }
- else {
- ht_key = callocz(1, sizeof(HASHED_KEY));
- ht_key->key = strdupz(k->key);
- ht_key->len = k->len;
- ht_key->hash = k->hash;
- ht_key->flags = HK_HASHTABLE_ALLOCATED;
-
- simple_hashtable_set_slot_KEY(&jb->hashtable, slot, ht_key->hash, ht_key);
- }
-
- k->hashtable_ptr = ht_key;
- }
-
- return k->hashtable_ptr;
-}
-
-static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, const char *key) {
- HASHED_KEY find = {
- .key = key,
- .len = strlen(key),
- };
- find.hash = XXH3_64bits(key, find.len);
-
- return get_key_from_hashtable(jb, &find);
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void validate_key(LOG_JOB *jb __maybe_unused, HASHED_KEY *k) {
- if(k->len > JOURNAL_MAX_KEY_LEN)
- log2stderr("WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows",
- k->key, (size_t)k->len, (size_t)JOURNAL_MAX_KEY_LEN);
-
- for(size_t i = 0; i < k->len ;i++) {
- char c = k->key[i];
-
- if((c < 'A' || c > 'Z') && !isdigit(c) && c != '_') {
- log2stderr("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key);
- break;
- }
- }
-
- if(isdigit(k->key[0]))
- log2stderr("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key);
-
- if(k->key[0] == '_')
- log2stderr("WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. "
- "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.", k->key);
-}
-
-// ----------------------------------------------------------------------------
-
-static inline size_t replace_evaluate_to_buffer(LOG_JOB *jb, HASHED_KEY *k __maybe_unused, REPLACE_PATTERN *rp, char *dst, size_t dst_size) {
- size_t remaining = dst_size;
- char *copy_to = dst;
-
- for(REPLACE_NODE *node = rp->nodes; node != NULL && remaining > 1; node = node->next) {
- if(node->is_variable) {
- if(hashed_keys_match(&node->name, &jb->line.key)) {
- size_t copied = copy_to_buffer(copy_to, remaining, jb->line.trimmed, jb->line.trimmed_len);
- copy_to += copied;
- remaining -= copied;
- }
- else {
- HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key);
- if(ktmp->value.len) {
- size_t copied = copy_to_buffer(copy_to, remaining, ktmp->value.txt, ktmp->value.len);
- copy_to += copied;
- remaining -= copied;
- }
- }
- }
- else {
- size_t copied = copy_to_buffer(copy_to, remaining, node->name.key, node->name.len);
- copy_to += copied;
- remaining -= copied;
- }
- }
-
- return copy_to - dst;
-}
-
-static inline void replace_evaluate(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN *rp) {
- HASHED_KEY *ht_key = get_key_from_hashtable(jb, k);
-
- // set it to empty value
- k->value.len = 0;
-
- for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) {
- if(node->is_variable) {
- if(hashed_keys_match(&node->name, &jb->line.key))
- txt_expand_and_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len);
-
- else {
- HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key);
- if(ktmp->value.len)
- txt_expand_and_append(&ht_key->value, ktmp->value.txt, ktmp->value.len);
- }
- }
- else
- txt_expand_and_append(&ht_key->value, node->name.key, node->name.len);
- }
-}
-
-static inline void replace_evaluate_from_pcre2(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN *rp, SEARCH_PATTERN *sp) {
- assert(k->flags & HK_HASHTABLE_ALLOCATED);
-
- // set the temporary TEXT to zero length
- jb->rewrites.tmp.len = 0;
-
- PCRE2_SIZE *ovector = pcre2_get_ovector_pointer(sp->match_data);
-
- // Iterate through the linked list of replacement nodes
- for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) {
- if(node->is_variable) {
- int group_number = pcre2_substring_number_from_name(
- sp->re, (PCRE2_SPTR) node->name.key);
-
- if(group_number >= 0) {
- PCRE2_SIZE start_offset = ovector[2 * group_number];
- PCRE2_SIZE end_offset = ovector[2 * group_number + 1];
- PCRE2_SIZE length = end_offset - start_offset;
-
- txt_expand_and_append(&jb->rewrites.tmp, k->value.txt + start_offset, length);
- }
- else {
- if(hashed_keys_match(&node->name, &jb->line.key))
- txt_expand_and_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len);
-
- else {
- HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key);
- if(ktmp->value.len)
- txt_expand_and_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len);
- }
- }
- }
- else {
- txt_expand_and_append(&jb->rewrites.tmp, node->name.key, node->name.len);
- }
- }
-
- // swap the values of the temporary TEXT and the key value
- TEXT tmp = k->value;
- k->value = jb->rewrites.tmp;
- jb->rewrites.tmp = tmp;
-}
-
-static inline bool rewrite_conditions_satisfied(LOG_JOB *jb, HASHED_KEY *k, REWRITE *rw) {
- assert(k->flags & HK_HASHTABLE_ALLOCATED);
-
- if(rw->flags & RW_MATCH_PCRE2) {
- return search_pattern_matches(&rw->match_pcre2, k->value.txt, k->value.len);
- }
- else if(rw->flags & RW_MATCH_NON_EMPTY) {
- char buffer[2]; // we don't need a big buffer - we just check if anything is written
- if(replace_evaluate_to_buffer(jb, k, &rw->match_non_empty, buffer, sizeof(buffer)))
- // it copied something
- return true;
- else
- // it copied nothing
- return false;
- }
- else
- // no conditions
- return true;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline HASHED_KEY *rename_key(LOG_JOB *jb, HASHED_KEY *k) {
- if(!(k->flags & HK_RENAMES_CHECKED) || k->flags & HK_HAS_RENAMES) {
- k->flags |= HK_RENAMES_CHECKED;
-
- for(size_t i = 0; i < jb->renames.used; i++) {
- RENAME *rn = &jb->renames.array[i];
-
- if(hashed_keys_match(&rn->old_key, k)) {
- k->flags |= HK_HAS_RENAMES;
-
- return get_key_from_hashtable(jb, &rn->new_key);
- }
- }
- }
-
- return k;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void send_key_value_constant(LOG_JOB *jb __maybe_unused, HASHED_KEY *key, const char *value, size_t len) {
- HASHED_KEY *ht_key = get_key_from_hashtable(jb, key);
-
- txt_replace(&ht_key->value, value, len);
- ht_key->flags |= HK_VALUE_FROM_LOG;
-
- // fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt);
-}
-
-static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char *format, ...) __attribute__ ((format(__printf__, 3, 4)));
-static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char *format, ...) {
- HASHED_KEY *ht_key = get_key_from_hashtable(jb, key);
-
- printf("%s=", ht_key->key);
- va_list args;
- va_start(args, format);
- vprintf(format, args);
- va_end(args);
- printf("\n");
-}
-
-inline void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len) {
- HASHED_KEY *ht_key = get_key_from_hashtable_with_char_ptr(jb, key);
- HASHED_KEY *nk = rename_key(jb, ht_key);
- txt_replace(&nk->value, value, len);
- ht_key->flags |= HK_VALUE_FROM_LOG;
-
-// fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt);
-}
-
-static inline void log_job_process_rewrites(LOG_JOB *jb) {
- for(size_t i = 0; i < jb->rewrites.used ;i++) {
- REWRITE *rw = &jb->rewrites.array[i];
-
- HASHED_KEY *k = get_key_from_hashtable(jb, &rw->key);
-
- if(!(rw->flags & RW_INJECT) && !(k->flags & HK_VALUE_FROM_LOG) && !k->value.len)
- continue;
-
- if(!(k->flags & HK_VALUE_REWRITTEN) && rewrite_conditions_satisfied(jb, k, rw)) {
- if(rw->flags & RW_MATCH_PCRE2)
- replace_evaluate_from_pcre2(jb, k, &rw->value, &rw->match_pcre2);
- else
- replace_evaluate(jb, k, &rw->value);
-
- if(!(rw->flags & RW_DONT_STOP))
- k->flags |= HK_VALUE_REWRITTEN;
-
-// fprintf(stderr, "REWRITE %s=%.*s\n", k->key, (int)k->value.len, k->value.txt);
- }
- }
-}
-
-static inline void send_all_fields(LOG_JOB *jb) {
- SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY(&jb->hashtable, kptr, HASHED_KEY, _KEY) {
- HASHED_KEY *k = SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY_VALUE(kptr);
-
- if(k->value.len) {
- // the key exists and has some value
-
- if(!(k->flags & HK_FILTERED)) {
- k->flags |= HK_FILTERED;
-
- bool included = jb->filter.include.re ? search_pattern_matches(&jb->filter.include, k->key, k->len) : true;
- bool excluded = jb->filter.exclude.re ? search_pattern_matches(&jb->filter.exclude, k->key, k->len) : false;
-
- if(included && !excluded)
- k->flags |= HK_FILTERED_INCLUDED;
- else
- k->flags &= ~HK_FILTERED_INCLUDED;
-
- // log some error if the key does not comply to journal standards
- validate_key(jb, k);
- }
-
- if(k->flags & HK_FILTERED_INCLUDED)
- printf("%s=%.*s\n", k->key, (int)k->value.len, k->value.txt);
-
- // reset it for the next round
- k->value.txt[0] = '\0';
- k->value.len = 0;
- }
-
- k->flags &= ~(HK_VALUE_REWRITTEN | HK_VALUE_FROM_LOG);
- }
-}
-
-// ----------------------------------------------------------------------------
-// injection of constant fields
-
-static void select_which_injections_should_be_injected_on_unmatched(LOG_JOB *jb) {
- // mark all injections to be added to unmatched logs
- for(size_t i = 0; i < jb->injections.used ; i++)
- jb->injections.keys[i].on_unmatched = true;
-
- if(jb->injections.used && jb->unmatched.injections.used) {
- // we have both injections and injections on unmatched
-
- // we find all the injections that are also configured as injections on unmatched,
- // and we disable them, so that the output will not have the same key twice
-
- for(size_t i = 0; i < jb->injections.used ;i++) {
- for(size_t u = 0; u < jb->unmatched.injections.used ; u++) {
- if(strcmp(jb->injections.keys[i].key.key, jb->unmatched.injections.keys[u].key.key) == 0)
- jb->injections.keys[i].on_unmatched = false;
- }
- }
- }
-}
-
-
-static inline void jb_finalize_injections(LOG_JOB *jb, bool line_is_matched) {
- for (size_t j = 0; j < jb->injections.used; j++) {
- if(!line_is_matched && !jb->injections.keys[j].on_unmatched)
- continue;
-
- INJECTION *inj = &jb->injections.keys[j];
-
- replace_evaluate(jb, &inj->key, &inj->value);
- }
-}
-
-// ----------------------------------------------------------------------------
-// filename injection
-
-static inline void jb_inject_filename(LOG_JOB *jb) {
- if (jb->filename.key.key && jb->filename.current.len)
- send_key_value_constant(jb, &jb->filename.key, jb->filename.current.txt, jb->filename.current.len);
-}
-
-static inline bool jb_switched_filename(LOG_JOB *jb, const char *line, size_t len) {
- // IMPORTANT:
- // Return TRUE when the caller should skip this line (because it is ours).
- // Unfortunately, we have to consume empty lines too.
-
- // IMPORTANT:
- // filename may not be NULL terminated and have more data than the filename.
-
- if (!len) {
- jb->filename.last_line_was_empty = true;
- return true;
- }
-
- // Check if it's a log file change line
- if (jb->filename.last_line_was_empty && line[0] == '=' && strncmp(line, "==> ", 4) == 0) {
- const char *start = line + 4;
- const char *end = strstr(line, " <==");
- while (*start == ' ') start++;
- if (*start != '\n' && *start != '\0' && end) {
- txt_replace(&jb->filename.current, start, end - start);
- return true;
- }
- }
-
- jb->filename.last_line_was_empty = false;
- return false;
-}
-
-static inline bool jb_send_unmatched_line(LOG_JOB *jb, const char *line) {
- if (!jb->unmatched.key.key)
- return false;
-
- // we are sending errors to systemd-journal
- send_key_value_error(jb, &jb->unmatched.key, "Parsing error on: %s", line);
-
- for (size_t j = 0; j < jb->unmatched.injections.used; j++) {
- INJECTION *inj = &jb->unmatched.injections.keys[j];
-
- replace_evaluate(jb, &inj->key, &inj->value);
- }
-
- return true;
-}
-
-// ----------------------------------------------------------------------------
-// running a job
-
-static char *get_next_line(LOG_JOB *jb __maybe_unused, char *buffer, size_t size, size_t *line_length) {
- if(!fgets(buffer, (int)size, stdin)) {
- *line_length = 0;
- return NULL;
- }
-
- char *line = buffer;
- size_t len = strlen(line);
-
- // remove trailing newlines and spaces
- while(len > 1 && (line[len - 1] == '\n' || isspace(line[len - 1])))
- line[--len] = '\0';
-
- // skip leading spaces
- while(isspace(*line)) {
- line++;
- len--;
- }
-
- *line_length = len;
- return line;
-}
-
-int log_job_run(LOG_JOB *jb) {
- select_which_injections_should_be_injected_on_unmatched(jb);
-
- PCRE2_STATE *pcre2 = NULL;
- LOG_JSON_STATE *json = NULL;
- LOGFMT_STATE *logfmt = NULL;
-
- if(strcmp(jb->pattern, "json") == 0) {
- json = json_parser_create(jb);
- // never fails
- }
- else if(strcmp(jb->pattern, "logfmt") == 0) {
- logfmt = logfmt_parser_create(jb);
- // never fails
- }
- else if(strcmp(jb->pattern, "none") != 0) {
- pcre2 = pcre2_parser_create(jb);
- if(pcre2_has_error(pcre2)) {
- log2stderr("%s", pcre2_parser_error(pcre2));
- pcre2_parser_destroy(pcre2);
- return 1;
- }
- }
-
- jb->line.buffer = mallocz(MAX_LINE_LENGTH + 1);
- jb->line.size = MAX_LINE_LENGTH + 1;
- jb->line.trimmed_len = 0;
- jb->line.trimmed = jb->line.buffer;
-
- while ((jb->line.trimmed = get_next_line(jb, (char *)jb->line.buffer, jb->line.size, &jb->line.trimmed_len))) {
- const char *line = jb->line.trimmed;
- size_t len = jb->line.trimmed_len;
-
- if(jb_switched_filename(jb, line, len))
- continue;
-
- bool line_is_matched = true;
-
- if(json)
- line_is_matched = json_parse_document(json, line);
- else if(logfmt)
- line_is_matched = logfmt_parse_document(logfmt, line);
- else if(pcre2)
- line_is_matched = pcre2_parse_document(pcre2, line, len);
-
- if(!line_is_matched) {
- if(json)
- log2stderr("%s", json_parser_error(json));
- else if(logfmt)
- log2stderr("%s", logfmt_parser_error(logfmt));
- else if(pcre2)
- log2stderr("%s", pcre2_parser_error(pcre2));
-
- if(!jb_send_unmatched_line(jb, line))
- // just logging to stderr, not sending unmatched lines
- continue;
- }
-
- jb_inject_filename(jb);
- jb_finalize_injections(jb, line_is_matched);
-
- log_job_process_rewrites(jb);
- send_all_fields(jb);
- printf("\n");
- fflush(stdout);
- }
-
- if(json)
- json_parser_destroy(json);
-
- else if(logfmt)
- logfmt_parser_destroy(logfmt);
-
- else if(pcre2)
- pcre2_parser_destroy(pcre2);
-
- freez((void *)jb->line.buffer);
-
- return 0;
-}
-
-// ----------------------------------------------------------------------------
-
-int main(int argc, char *argv[]) {
- LOG_JOB log_job;
-
- log_job_init(&log_job);
-
- if(!log_job_command_line_parse_parameters(&log_job, argc, argv))
- exit(1);
-
- if(log_job.show_config)
- log_job_configuration_to_yaml(&log_job);
-
- int ret = log_job_run(&log_job);
-
- log_job_cleanup(&log_job);
- return ret;
-}
diff --git a/collectors/log2journal/log2journal.d/default.yaml b/collectors/log2journal/log2journal.d/default.yaml
deleted file mode 100644
index d41efc4ab..000000000
--- a/collectors/log2journal/log2journal.d/default.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-pattern: none
-
-filename:
- key: LOG_FILENAME
-
-inject:
- - key: MESSAGE
- value: '${LINE}' # a special variable that resolves to the whole line read from the log
-
- - key: PRIORITY
- value: 6 # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug
-
- - key: SYSLOG_IDENTIFIER
- value: log2journal # the name of the application sending the logs
-
diff --git a/collectors/log2journal/log2journal.d/nginx-combined.yaml b/collectors/log2journal/log2journal.d/nginx-combined.yaml
deleted file mode 100644
index 003c774d7..000000000
--- a/collectors/log2journal/log2journal.d/nginx-combined.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-# Netdata log2journal Configuration
-# The following parses nginx log files using the combined format.
-
-# The PCRE2 pattern to match log entries and give names to the fields.
-# The journal will have these names, so follow their rules. You can
-# initiate an extended PCRE2 pattern by starting the pattern with (?x)
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<NGINX_REMOTE_ADDR>[^ ]+) \s - \s # NGINX_REMOTE_ADDR
- (?<NGINX_REMOTE_USER>[^ ]+) \s # NGINX_REMOTE_USER
- \[
- (?<NGINX_TIME_LOCAL>[^\]]+) # NGINX_TIME_LOCAL
- \]
- \s+ "
- (?<NGINX_REQUEST>
- (?<NGINX_REQUEST_METHOD>[A-Z]+) \s+ # NGINX_METHOD
- (?<NGINX_REQUEST_URI>[^ ]+) \s+
- (?<NGINX_SERVER_PROTOCOL>[^"]+)
- )
- " \s+
- (?<NGINX_STATUS>\d+) \s+ # NGINX_STATUS
- (?<NGINX_BODY_BYTES_SENT>\d+) \s+ # NGINX_BODY_BYTES_SENT
- "(?<NGINX_HTTP_REFERER>[^"]*)" \s+ # NGINX_HTTP_REFERER
- "(?<NGINX_HTTP_USER_AGENT>[^"]*)" # NGINX_HTTP_USER_AGENT
-
-# When log2journal can detect the filename of each log entry (tail gives it
-# only when it tails multiple files), this key will be used to send the
-# filename to the journals.
-filename:
- key: NGINX_LOG_FILENAME
-
-rename:
- - new_key: MESSAGE
- old_key: NGINX_REQUEST
-
-# Inject constant fields into the journal logs.
-inject:
- - key: SYSLOG_IDENTIFIER
- value: nginx-log
-
- # inject PRIORITY is a duplicate of NGINX_STATUS
- - key: PRIORITY
- value: '${NGINX_STATUS}'
-
- # Inject NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS
- - key: NGINX_STATUS_FAMILY
- value: '${NGINX_STATUS}'
-
-# Rewrite the value of fields (including the duplicated ones).
-# The search pattern can have named groups, and the replace pattern can use
-# them as ${name}.
-rewrite:
- # PRIORITY is a duplicate of NGINX_STATUS
- # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug
- - key: PRIORITY
- match: '^[123]'
- value: 6
-
- - key: PRIORITY
- match: '^4'
- value: 5
-
- - key: PRIORITY
- match: '^5'
- value: 3
-
- - key: PRIORITY
- match: '.*'
- value: 4
-
- # NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS
- - key: NGINX_STATUS_FAMILY
- match: '^(?<first_digit>[1-5])'
- value: '${first_digit}xx'
-
- - key: NGINX_STATUS_FAMILY
- match: '.*'
- value: 'UNKNOWN'
-
-# Control what to do when input logs do not match the main PCRE2 pattern.
-unmatched:
- # The journal key to log the PCRE2 error message to.
- # Set this to MESSAGE, so you to see the error in the log.
- key: MESSAGE
-
- # Inject static fields to the unmatched entries.
- # Set PRIORITY=1 (alert) to help you spot unmatched entries in the logs.
- inject:
- - key: PRIORITY
- value: 1
diff --git a/collectors/log2journal/log2journal.d/nginx-json.yaml b/collectors/log2journal/log2journal.d/nginx-json.yaml
deleted file mode 100644
index 7fdc4be58..000000000
--- a/collectors/log2journal/log2journal.d/nginx-json.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-# For all nginx variables, check this:
-# https://nginx.org/en/docs/http/ngx_http_core_module.html#var_connection_requests
-
-pattern: json
-
-prefix: NGINX_
-
-# When log2journal can detect the filename of each log entry (tail gives it
-# only when it tails multiple files), this key will be used to send the
-# filename to the journals.
-filename:
- key: NGINX_LOG_FILENAME
-
-filter:
- exclude: '^(NGINX_BINARY_REMOTE_ADDR)$'
-
-rename:
- - new_key: MESSAGE
- old_key: NGINX_REQUEST
-
- # args is an alias for query_string
- - new_key: NGINX_QUERY_STRING
- old_key: NGINX_ARGS
-
- # document_uri is an alias for uri
- - new_key: NGINX_URI
- old_key: NGINX_DOCUMENT_URI
-
- # is_args states if the request had a query string or not
- - new_key: NGINX_HAS_QUERY_STRING
- old_key: NGINX_IS_ARGS
-
- # msec is the timestamp in seconds, with fractional digits for milliseconds
- - new_key: NGINX_TIMESTAMP_SEC
- old_key: NGINX_MSEC
-
- # nginx_version is already prefixed with nginx, let's remove one of them
- - new_key: NGINX_VERSION
- old_key: NGINX_NGINX_VERSION
-
- # pipe states if the request was pipelined or not
- - new_key: NGINX_PIPELINED
- old_key: NGINX_PIPE
-
- # rename numeric TLVs to their names
- - new_key: NGINX_PROXY_PROTOCOL_TLV_ALPN
- old_key: NGINX_PROXY_PROTOCOL_TLV_0X01
- - new_key: NGINX_PROXY_PROTOCOL_TLV_AUTHORITY
- old_key: NGINX_PROXY_PROTOCOL_TLV_0X02
- - new_key: NGINX_PROXY_PROTOCOL_TLV_UNIQUE_ID
- old_key: NGINX_PROXY_PROTOCOL_TLV_0X05
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL
- old_key: NGINX_PROXY_PROTOCOL_TLV_0X20
- - new_key: NGINX_PROXY_PROTOCOL_TLV_NETNS
- old_key: NGINX_PROXY_PROTOCOL_TLV_0X30
-
- # rename numeric SSL TLVs to their names
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERSION
- old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X21
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_CN
- old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X22
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_CIPHER
- old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X23
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_SIG_ALG
- old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X24
- - new_key: NGINX_PROXY_PROTOCOL_TLV_SSL_KEY_ALG
- old_key: NGINX_PROXY_PROTOCOL_TLV_SSL_0X25
-
-# Inject constant fields into the journal logs.
-inject:
- - key: SYSLOG_IDENTIFIER
- value: nginx-log
-
- # inject PRIORITY is a duplicate of NGINX_STATUS
- - key: PRIORITY
- value: '${NGINX_STATUS}'
-
- # Inject NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS
- - key: NGINX_STATUS_FAMILY
- value: '${NGINX_STATUS}'
-
-
-# Rewrite the value of fields (including the duplicated ones).
-# The search pattern can have named groups, and the replace pattern can use
-# them as ${name}.
-rewrite:
- # a ? means it has query string, everything else means it does not
- - key: NGINX_HAS_QUERY_STRING
- match: '^\?$'
- value: yes
- - key: NGINX_HAS_QUERY_STRING
- match: '.*'
- value: no
-
- # 'on' means it was HTTPS, everything else means it was not
- - key: NGINX_HTTPS
- match: '^on$'
- value: yes
- - key: NGINX_HTTPS
- match: '.*'
- value: no
-
- # 'p' means it was pipelined, everything else means it was not
- - key: NGINX_PIPELINED
- match: '^p$'
- value: yes
- - key: NGINX_PIPELINED
- match: '.*'
- value: no
-
- # zero means client sent a certificate and it was verified, non-zero means otherwise
- - key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERIFY
- match: '^0$'
- value: yes
- - key: NGINX_PROXY_PROTOCOL_TLV_SSL_VERIFY
- match: '.*'
- value: no
-
- # 'OK' means request completed, everything else means it didn't
- - key: NGINX_REQUEST_COMPLETION
- match: '^OK$'
- value: 'completed'
- - key: NGINX_REQUEST_COMPLETION
- match: '.*'
- value: 'not completed'
-
- # PRIORTY is a duplicate of NGINX_STATUS
- # Valid PRIORITIES: 0=emerg, 1=alert, 2=crit, 3=error, 4=warn, 5=notice, 6=info, 7=debug
- - key: PRIORITY
- match: '^[123]'
- value: 6
-
- - key: PRIORITY
- match: '^4'
- value: 5
-
- - key: PRIORITY
- match: '^5'
- value: 3
-
- - key: PRIORITY
- match: '.*'
- value: 4
-
- # NGINX_STATUS_FAMILY is a duplicate of NGINX_STATUS
- - key: NGINX_STATUS_FAMILY
- match: '^(?<first_digit>[1-5])'
- value: '${first_digit}xx'
-
- - key: NGINX_STATUS_FAMILY
- match: '.*'
- value: 'UNKNOWN'
-
-# Control what to do when input logs do not match the main PCRE2 pattern.
-unmatched:
- # The journal key to log the PCRE2 error message to.
- # Set this to MESSAGE, so you to see the error in the log.
- key: MESSAGE
-
- # Inject static fields to the unmatched entries.
- # Set PRIORITY=1 (alert) to help you spot unmatched entries in the logs.
- inject:
- - key: PRIORITY
- value: 1
diff --git a/collectors/log2journal/log2journal.h b/collectors/log2journal/log2journal.h
deleted file mode 100644
index 834a5b135..000000000
--- a/collectors/log2journal/log2journal.h
+++ /dev/null
@@ -1,501 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_LOG2JOURNAL_H
-#define NETDATA_LOG2JOURNAL_H
-
-// only for PACKAGE_VERSION
-#include "config.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <dirent.h>
-#include <string.h>
-#include <stdbool.h>
-#include <string.h>
-#include <ctype.h>
-#include <math.h>
-#include <stdarg.h>
-#include <assert.h>
-
-// ----------------------------------------------------------------------------
-// logging
-
-// enable the compiler to check for printf like errors on our log2stderr() function
-static inline void log2stderr(const char *format, ...) __attribute__ ((format(__printf__, 1, 2)));
-static inline void log2stderr(const char *format, ...) {
- va_list args;
- va_start(args, format);
- vfprintf(stderr, format, args);
- va_end(args);
- fprintf(stderr, "\n");
-}
-
-// ----------------------------------------------------------------------------
-// allocation functions abstraction
-
-static inline void *mallocz(size_t size) {
- void *ptr = malloc(size);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", size);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void *callocz(size_t elements, size_t size) {
- void *ptr = calloc(elements, size);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", elements * size);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void *reallocz(void *ptr, size_t size) {
- void *new_ptr = realloc(ptr, size);
- if (!new_ptr) {
- log2stderr("Fatal Error: Memory reallocation failed. Requested size: %zu bytes.", size);
- exit(EXIT_FAILURE);
- }
- return new_ptr;
-}
-
-static inline char *strdupz(const char *s) {
- char *ptr = strdup(s);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed in strdup.");
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline char *strndupz(const char *s, size_t n) {
- char *ptr = strndup(s, n);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed in strndup. Requested size: %zu bytes.", n);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void freez(void *ptr) {
- if (ptr)
- free(ptr);
-}
-
-// ----------------------------------------------------------------------------
-
-#define XXH_INLINE_ALL
-#include "../../libnetdata/xxhash.h"
-
-#define PCRE2_CODE_UNIT_WIDTH 8
-#include <pcre2.h>
-
-#ifdef HAVE_LIBYAML
-#include <yaml.h>
-#endif
-
-// ----------------------------------------------------------------------------
-// hashtable for HASHED_KEY
-
-// cleanup hashtable defines
-#undef SIMPLE_HASHTABLE_SORT_FUNCTION
-#undef SIMPLE_HASHTABLE_VALUE_TYPE
-#undef SIMPLE_HASHTABLE_NAME
-#undef NETDATA_SIMPLE_HASHTABLE_H
-
-struct hashed_key;
-static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2);
-#define SIMPLE_HASHTABLE_SORT_FUNCTION compare_keys
-#define SIMPLE_HASHTABLE_VALUE_TYPE struct hashed_key
-#define SIMPLE_HASHTABLE_NAME _KEY
-#include "../../libnetdata/simple_hashtable.h"
-
-// ----------------------------------------------------------------------------
-
-#define MAX_OUTPUT_KEYS 1024
-#define MAX_LINE_LENGTH (1024 * 1024)
-#define MAX_INJECTIONS (MAX_OUTPUT_KEYS / 2)
-#define MAX_REWRITES (MAX_OUTPUT_KEYS / 2)
-#define MAX_RENAMES (MAX_OUTPUT_KEYS / 2)
-
-#define JOURNAL_MAX_KEY_LEN 64 // according to systemd-journald
-#define JOURNAL_MAX_VALUE_LEN (48 * 1024) // according to systemd-journald
-
-#define LOG2JOURNAL_CONFIG_PATH LIBCONFIG_DIR "/log2journal.d"
-
-// ----------------------------------------------------------------------------
-// character conversion for journal keys
-
-extern const char journal_key_characters_map[256];
-
-// ----------------------------------------------------------------------------
-// copy to buffer, while ensuring there is no buffer overflow
-
-static inline size_t copy_to_buffer(char *dst, size_t dst_size, const char *src, size_t src_len) {
- if(dst_size < 2) {
- if(dst_size == 1)
- *dst = '\0';
-
- return 0;
- }
-
- if(src_len <= dst_size - 1) {
- memcpy(dst, src, src_len);
- dst[src_len] = '\0';
- return src_len;
- }
- else {
- memcpy(dst, src, dst_size - 1);
- dst[dst_size - 1] = '\0';
- return dst_size - 1;
- }
-}
-
-// ----------------------------------------------------------------------------
-// A dynamically sized, reusable text buffer,
-// allowing us to be fast (no allocations during iterations) while having the
-// smallest possible allocations.
-
-typedef struct txt {
- char *txt;
- uint32_t size;
- uint32_t len;
-} TEXT;
-
-static inline void txt_cleanup(TEXT *t) {
- if(!t)
- return;
-
- if(t->txt)
- freez(t->txt);
-
- t->txt = NULL;
- t->size = 0;
- t->len = 0;
-}
-
-static inline void txt_replace(TEXT *t, const char *s, size_t len) {
- if(!s || !*s || len == 0) {
- s = "";
- len = 0;
- }
-
- if(len + 1 <= t->size) {
- // the existing value allocation, fits our value
-
- memcpy(t->txt, s, len);
- t->txt[len] = '\0';
- t->len = len;
- }
- else {
- // no existing value allocation, or too small for our value
- // cleanup and increase the buffer
-
- txt_cleanup(t);
-
- t->txt = strndupz(s, len);
- t->size = len + 1;
- t->len = len;
- }
-}
-
-static inline void txt_expand_and_append(TEXT *t, const char *s, size_t len) {
- if(len + 1 > (t->size - t->len)) {
- size_t new_size = t->len + len + 1;
- if(new_size < t->size * 2)
- new_size = t->size * 2;
-
- t->txt = reallocz(t->txt, new_size);
- t->size = new_size;
- }
-
- char *copy_to = &t->txt[t->len];
- memcpy(copy_to, s, len);
- copy_to[len] = '\0';
- t->len += len;
-}
-
-// ----------------------------------------------------------------------------
-
-typedef enum __attribute__((__packed__)) {
- HK_NONE = 0,
-
- // permanent flags - they are set once to optimize various decisions and lookups
-
- HK_HASHTABLE_ALLOCATED = (1 << 0), // this is key object allocated in the hashtable
- // objects that do not have this, have a pointer to a key in the hashtable
- // objects that have this, value a value allocated
-
- HK_FILTERED = (1 << 1), // we checked once if this key in filtered
- HK_FILTERED_INCLUDED = (1 << 2), // the result of the filtering was to include it in the output
-
- HK_COLLISION_CHECKED = (1 << 3), // we checked once for collision check of this key
-
- HK_RENAMES_CHECKED = (1 << 4), // we checked once if there are renames on this key
- HK_HAS_RENAMES = (1 << 5), // and we found there is a rename rule related to it
-
- // ephemeral flags - they are unset at the end of each log line
-
- HK_VALUE_FROM_LOG = (1 << 14), // the value of this key has been read from the log (or from injection, duplication)
- HK_VALUE_REWRITTEN = (1 << 15), // the value of this key has been rewritten due to one of our rewrite rules
-
-} HASHED_KEY_FLAGS;
-
-typedef struct hashed_key {
- const char *key;
- uint32_t len;
- HASHED_KEY_FLAGS flags;
- XXH64_hash_t hash;
- union {
- struct hashed_key *hashtable_ptr; // HK_HASHTABLE_ALLOCATED is not set
- TEXT value; // HK_HASHTABLE_ALLOCATED is set
- };
-} HASHED_KEY;
-
-static inline void hashed_key_cleanup(HASHED_KEY *k) {
- if(k->key) {
- freez((void *)k->key);
- k->key = NULL;
- }
-
- if(k->flags & HK_HASHTABLE_ALLOCATED)
- txt_cleanup(&k->value);
- else
- k->hashtable_ptr = NULL;
-}
-
-static inline void hashed_key_set(HASHED_KEY *k, const char *name) {
- hashed_key_cleanup(k);
-
- k->key = strdupz(name);
- k->len = strlen(k->key);
- k->hash = XXH3_64bits(k->key, k->len);
- k->flags = HK_NONE;
-}
-
-static inline void hashed_key_len_set(HASHED_KEY *k, const char *name, size_t len) {
- hashed_key_cleanup(k);
-
- k->key = strndupz(name, len);
- k->len = len;
- k->hash = XXH3_64bits(k->key, k->len);
- k->flags = HK_NONE;
-}
-
-static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) {
- return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0));
-}
-
-static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) {
- return strcmp(k1->key, k2->key);
-}
-
-// ----------------------------------------------------------------------------
-
-typedef struct search_pattern {
- const char *pattern;
- pcre2_code *re;
- pcre2_match_data *match_data;
- TEXT error;
-} SEARCH_PATTERN;
-
-void search_pattern_cleanup(SEARCH_PATTERN *sp);
-bool search_pattern_set(SEARCH_PATTERN *sp, const char *search_pattern, size_t search_pattern_len);
-
-static inline bool search_pattern_matches(SEARCH_PATTERN *sp, const char *value, size_t value_len) {
- return pcre2_match(sp->re, (PCRE2_SPTR)value, value_len, 0, 0, sp->match_data, NULL) >= 0;
-}
-
-// ----------------------------------------------------------------------------
-
-typedef struct replacement_node {
- HASHED_KEY name;
- bool is_variable;
- bool logged_error;
-
- struct replacement_node *next;
-} REPLACE_NODE;
-
-void replace_node_free(REPLACE_NODE *rpn);
-
-typedef struct replace_pattern {
- const char *pattern;
- REPLACE_NODE *nodes;
- bool has_variables;
-} REPLACE_PATTERN;
-
-void replace_pattern_cleanup(REPLACE_PATTERN *rp);
-bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern);
-
-// ----------------------------------------------------------------------------
-
-typedef struct injection {
- bool on_unmatched;
- HASHED_KEY key;
- REPLACE_PATTERN value;
-} INJECTION;
-
-void injection_cleanup(INJECTION *inj);
-
-// ----------------------------------------------------------------------------
-
-typedef struct key_rename {
- HASHED_KEY new_key;
- HASHED_KEY old_key;
-} RENAME;
-
-void rename_cleanup(RENAME *rn);
-
-// ----------------------------------------------------------------------------
-
-typedef enum __attribute__((__packed__)) {
- RW_NONE = 0,
- RW_MATCH_PCRE2 = (1 << 1), // a rewrite rule
- RW_MATCH_NON_EMPTY = (1 << 2), // a rewrite rule
- RW_DONT_STOP = (1 << 3),
- RW_INJECT = (1 << 4),
-} RW_FLAGS;
-
-typedef struct key_rewrite {
- RW_FLAGS flags;
- HASHED_KEY key;
- union {
- SEARCH_PATTERN match_pcre2;
- REPLACE_PATTERN match_non_empty;
- };
- REPLACE_PATTERN value;
-} REWRITE;
-
-void rewrite_cleanup(REWRITE *rw);
-
-// ----------------------------------------------------------------------------
-// A job configuration and runtime structures
-
-typedef struct log_job {
- bool show_config;
-
- const char *pattern;
- const char *prefix;
-
- SIMPLE_HASHTABLE_KEY hashtable;
-
- struct {
- const char *buffer;
- const char *trimmed;
- size_t trimmed_len;
- size_t size;
- HASHED_KEY key;
- } line;
-
- struct {
- SEARCH_PATTERN include;
- SEARCH_PATTERN exclude;
- } filter;
-
- struct {
- bool last_line_was_empty;
- HASHED_KEY key;
- TEXT current;
- } filename;
-
- struct {
- uint32_t used;
- INJECTION keys[MAX_INJECTIONS];
- } injections;
-
- struct {
- HASHED_KEY key;
- struct {
- uint32_t used;
- INJECTION keys[MAX_INJECTIONS];
- } injections;
- } unmatched;
-
- struct {
- uint32_t used;
- REWRITE array[MAX_REWRITES];
- TEXT tmp;
- } rewrites;
-
- struct {
- uint32_t used;
- RENAME array[MAX_RENAMES];
- } renames;
-} LOG_JOB;
-
-// initialize a log job
-void log_job_init(LOG_JOB *jb);
-
-// free all resources consumed by the log job
-void log_job_cleanup(LOG_JOB *jb);
-
-// ----------------------------------------------------------------------------
-
-// the entry point to send key value pairs to the output
-// this implements the pipeline of processing renames, rewrites and duplications
-void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len);
-
-// ----------------------------------------------------------------------------
-// configuration related
-
-// management of configuration to set settings
-bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len);
-bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len);
-bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len);
-bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched);
-bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern);
-bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len);
-bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len);
-bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len);
-
-// entry point to parse command line parameters
-bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv);
-void log_job_command_line_help(const char *name);
-
-// ----------------------------------------------------------------------------
-// YAML configuration related
-
-#ifdef HAVE_LIBYAML
-bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb);
-bool yaml_parse_config(const char *config_name, LOG_JOB *jb);
-#endif
-
-void log_job_configuration_to_yaml(LOG_JOB *jb);
-
-// ----------------------------------------------------------------------------
-// JSON parser
-
-typedef struct log_json_state LOG_JSON_STATE;
-LOG_JSON_STATE *json_parser_create(LOG_JOB *jb);
-void json_parser_destroy(LOG_JSON_STATE *js);
-const char *json_parser_error(LOG_JSON_STATE *js);
-bool json_parse_document(LOG_JSON_STATE *js, const char *txt);
-void json_test(void);
-
-size_t parse_surrogate(const char *s, char *d, size_t *remaining);
-
-// ----------------------------------------------------------------------------
-// logfmt parser
-
-typedef struct logfmt_state LOGFMT_STATE;
-LOGFMT_STATE *logfmt_parser_create(LOG_JOB *jb);
-void logfmt_parser_destroy(LOGFMT_STATE *lfs);
-const char *logfmt_parser_error(LOGFMT_STATE *lfs);
-bool logfmt_parse_document(LOGFMT_STATE *js, const char *txt);
-void logfmt_test(void);
-
-// ----------------------------------------------------------------------------
-// pcre2 parser
-
-typedef struct pcre2_state PCRE2_STATE;
-PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb);
-void pcre2_parser_destroy(PCRE2_STATE *pcre2);
-const char *pcre2_parser_error(PCRE2_STATE *pcre2);
-bool pcre2_parse_document(PCRE2_STATE *pcre2, const char *txt, size_t len);
-bool pcre2_has_error(PCRE2_STATE *pcre2);
-void pcre2_test(void);
-
-void pcre2_get_error_in_buffer(char *msg, size_t msg_len, int rc, int pos);
-
-#endif //NETDATA_LOG2JOURNAL_H
diff --git a/collectors/log2journal/tests.d/default.output b/collectors/log2journal/tests.d/default.output
deleted file mode 100644
index ef17cb2c7..000000000
--- a/collectors/log2journal/tests.d/default.output
+++ /dev/null
@@ -1,20 +0,0 @@
-MESSAGE=key1=value01 key2=value02 key3=value03 key4=value04
-PRIORITY=6
-SYSLOG_IDENTIFIER=log2journal
-
-MESSAGE=key1=value11 key2=value12 key3=value13 key4=
-PRIORITY=6
-SYSLOG_IDENTIFIER=log2journal
-
-MESSAGE=key1=value21 key2=value22 key3=value23 key4=value24
-PRIORITY=6
-SYSLOG_IDENTIFIER=log2journal
-
-MESSAGE=key1=value31 key2=value32 key3=value33 key4=
-PRIORITY=6
-SYSLOG_IDENTIFIER=log2journal
-
-MESSAGE=key1=value41 key2=value42 key3=value43 key4=value44
-PRIORITY=6
-SYSLOG_IDENTIFIER=log2journal
-
diff --git a/collectors/log2journal/tests.d/full.output b/collectors/log2journal/tests.d/full.output
deleted file mode 100644
index 074092d4e..000000000
--- a/collectors/log2journal/tests.d/full.output
+++ /dev/null
@@ -1,77 +0,0 @@
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<NGINX_REMOTE_ADDR>[^ ]+) \s - \s # NGINX_REMOTE_ADDR
- (?<NGINX_REMOTE_USER>[^ ]+) \s # NGINX_REMOTE_USER
- \[
- (?<NGINX_TIME_LOCAL>[^\]]+) # NGINX_TIME_LOCAL
- \]
- \s+ "
- (?<MESSAGE>
- (?<NGINX_METHOD>[A-Z]+) \s+ # NGINX_METHOD
- (?<NGINX_URL>[^ ]+) \s+
- HTTP/(?<NGINX_HTTP_VERSION>[^"]+)
- )
- " \s+
- (?<NGINX_STATUS>\d+) \s+ # NGINX_STATUS
- (?<NGINX_BODY_BYTES_SENT>\d+) \s+ # NGINX_BODY_BYTES_SENT
- "(?<NGINX_HTTP_REFERER>[^"]*)" \s+ # NGINX_HTTP_REFERER
- "(?<NGINX_HTTP_USER_AGENT>[^"]*)" # NGINX_HTTP_USER_AGENT
-
-prefix: NGINX_
-
-filename:
- key: NGINX_LOG_FILENAME
-
-filter:
- include: '.*'
- exclude: '.*HELLO.*WORLD.*'
-
-rename:
- - new_key: TEST1
- old_key: TEST2
- - new_key: TEST3
- old_key: TEST4
-
-inject:
- - key: SYSLOG_IDENTIFIER
- value: nginx-log
- - key: SYSLOG_IDENTIFIER2
- value: nginx-log2
- - key: PRIORITY
- value: '${NGINX_STATUS}'
- - key: NGINX_STATUS_FAMILY
- value: '${NGINX_STATUS}${NGINX_METHOD}'
-
-rewrite:
- - key: PRIORITY
- value: '${NGINX_STATUS}'
- inject: yes
- stop: no
- - key: PRIORITY
- match: '^[123]'
- value: 6
- - key: PRIORITY
- match: '^4'
- value: 5
- - key: PRIORITY
- match: '^5'
- value: 3
- - key: PRIORITY
- match: '.*'
- value: 4
- - key: NGINX_STATUS_FAMILY
- match: '^(?<first_digit>[1-5])'
- value: '${first_digit}xx'
- - key: NGINX_STATUS_FAMILY
- match: '.*'
- value: UNKNOWN
-
-unmatched:
- key: MESSAGE
-
- inject:
- - key: PRIORITY
- value: 1
- - key: PRIORITY2
- value: 2
diff --git a/collectors/log2journal/tests.d/full.yaml b/collectors/log2journal/tests.d/full.yaml
deleted file mode 100644
index 86cafb5a2..000000000
--- a/collectors/log2journal/tests.d/full.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-pattern: |
- (?x) # Enable PCRE2 extended mode
- ^
- (?<NGINX_REMOTE_ADDR>[^ ]+) \s - \s # NGINX_REMOTE_ADDR
- (?<NGINX_REMOTE_USER>[^ ]+) \s # NGINX_REMOTE_USER
- \[
- (?<NGINX_TIME_LOCAL>[^\]]+) # NGINX_TIME_LOCAL
- \]
- \s+ "
- (?<MESSAGE>
- (?<NGINX_METHOD>[A-Z]+) \s+ # NGINX_METHOD
- (?<NGINX_URL>[^ ]+) \s+
- HTTP/(?<NGINX_HTTP_VERSION>[^"]+)
- )
- " \s+
- (?<NGINX_STATUS>\d+) \s+ # NGINX_STATUS
- (?<NGINX_BODY_BYTES_SENT>\d+) \s+ # NGINX_BODY_BYTES_SENT
- "(?<NGINX_HTTP_REFERER>[^"]*)" \s+ # NGINX_HTTP_REFERER
- "(?<NGINX_HTTP_USER_AGENT>[^"]*)" # NGINX_HTTP_USER_AGENT
-
-prefix: NGINX_
-
-filename:
- key: NGINX_LOG_FILENAME
-
-filter:
- include: '.*'
- exclude: '.*HELLO.*WORLD.*'
-
-rename:
- - new_key: TEST1
- old_key: TEST2
- - new_key: TEST3
- old_key: TEST4
-
-inject:
- - key: SYSLOG_IDENTIFIER
- value: 'nginx-log'
- - key: SYSLOG_IDENTIFIER2
- value: 'nginx-log2'
- - key: PRIORITY
- value: '${NGINX_STATUS}'
- - key: NGINX_STATUS_FAMILY
- value: '${NGINX_STATUS}${NGINX_METHOD}'
-
-rewrite:
- - key: "PRIORITY"
- value: "${NGINX_STATUS}"
- inject: yes
- stop: no
- - key: "PRIORITY"
- match: "^[123]"
- value: 6
- - key: "PRIORITY"
- match: "^4"
- value: 5
- - key: "PRIORITY"
- match: "^5"
- value: 3
- - key: "PRIORITY"
- match: ".*"
- value: 4
- - key: "NGINX_STATUS_FAMILY"
- match: "^(?<first_digit>[1-5])"
- value: "${first_digit}xx"
- - key: "NGINX_STATUS_FAMILY"
- match: ".*"
- value: "UNKNOWN"
-
-unmatched:
- key: MESSAGE
- inject:
- - key: PRIORITY
- value: 1
- - key: PRIORITY2
- value: 2
diff --git a/collectors/log2journal/tests.d/json-exclude.output b/collectors/log2journal/tests.d/json-exclude.output
deleted file mode 100644
index a8f6f83e6..000000000
--- a/collectors/log2journal/tests.d/json-exclude.output
+++ /dev/null
@@ -1,153 +0,0 @@
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
diff --git a/collectors/log2journal/tests.d/json-include.output b/collectors/log2journal/tests.d/json-include.output
deleted file mode 100644
index 326c58da2..000000000
--- a/collectors/log2journal/tests.d/json-include.output
+++ /dev/null
@@ -1,54 +0,0 @@
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-
diff --git a/collectors/log2journal/tests.d/json.log b/collectors/log2journal/tests.d/json.log
deleted file mode 100644
index 3f1334960..000000000
--- a/collectors/log2journal/tests.d/json.log
+++ /dev/null
@@ -1,3 +0,0 @@
-{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]}
-{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]}
-{ "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Hello, World!", "nullValue": null, "object": { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object", "nullValue": null, "array": [1, -2, 3, "Nested Array", true, null] }, "array": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 987, "numericNegative": -654, "string": "Nested Object in Array", "array": [null, false, true] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object", true, null] } ], "array2": [ 1, -2.345, "Array Element", true, false, null, { "numericPositive": 123, "numericNegative": -456, "floatPositive": 0.987, "floatNegative": -0.123, "scientificIntPositive": 6e4, "scientificFloatNegative": -1.5e-2, "scientificSmallPositive": 5e-5, "booleanTrue": true, "booleanFalse": false, "string": "Nested Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null] }, { "numericPositive": 42, "numericNegative": -123, "floatPositive": 3.14159, "floatNegative": -2.71828, "scientificIntPositive": 1e5, "scientificFloatNegative": -2.5e-3, "scientificSmallPositive": 1e-4, "booleanTrue": true, "booleanFalse": false, "string": "Array Element with Object in Array2", "nullValue": null, "array": [1, -2, 3, "Nested Array in Object2", true, null]}]}
diff --git a/collectors/log2journal/tests.d/json.output b/collectors/log2journal/tests.d/json.output
deleted file mode 100644
index 83499cc55..000000000
--- a/collectors/log2journal/tests.d/json.output
+++ /dev/null
@@ -1,294 +0,0 @@
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_ARRAY_0=1
-ARRAY2_6_ARRAY_1=-2
-ARRAY2_6_ARRAY_2=3
-ARRAY2_6_ARRAY_3=Nested Array in Object2
-ARRAY2_6_ARRAY_4=true
-ARRAY2_6_ARRAY_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_ARRAY_0=1
-ARRAY2_7_ARRAY_1=-2
-ARRAY2_7_ARRAY_2=3
-ARRAY2_7_ARRAY_3=Nested Array in Object2
-ARRAY2_7_ARRAY_4=true
-ARRAY2_7_ARRAY_5=null
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-ARRAY_0=1
-ARRAY_1=-2.345
-ARRAY_2=Array Element
-ARRAY_3=true
-ARRAY_4=false
-ARRAY_5=null
-ARRAY_6_ARRAY_0=null
-ARRAY_6_ARRAY_1=false
-ARRAY_6_ARRAY_2=true
-ARRAY_6_NUMERICNEGATIVE=-654
-ARRAY_6_NUMERICPOSITIVE=987
-ARRAY_6_STRING=Nested Object in Array
-ARRAY_7_ARRAY_0=1
-ARRAY_7_ARRAY_1=-2
-ARRAY_7_ARRAY_2=3
-ARRAY_7_ARRAY_3=Nested Array in Object
-ARRAY_7_ARRAY_4=true
-ARRAY_7_ARRAY_5=null
-ARRAY_7_BOOLEANFALSE=false
-ARRAY_7_BOOLEANTRUE=true
-ARRAY_7_FLOATNEGATIVE=-2.71828
-ARRAY_7_FLOATPOSITIVE=3.14159
-ARRAY_7_NULLVALUE=null
-ARRAY_7_NUMERICNEGATIVE=-123
-ARRAY_7_NUMERICPOSITIVE=42
-ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY_7_STRING=Array Element with Object
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_ARRAY_0=1
-ARRAY2_6_ARRAY_1=-2
-ARRAY2_6_ARRAY_2=3
-ARRAY2_6_ARRAY_3=Nested Array in Object2
-ARRAY2_6_ARRAY_4=true
-ARRAY2_6_ARRAY_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_ARRAY_0=1
-ARRAY2_7_ARRAY_1=-2
-ARRAY2_7_ARRAY_2=3
-ARRAY2_7_ARRAY_3=Nested Array in Object2
-ARRAY2_7_ARRAY_4=true
-ARRAY2_7_ARRAY_5=null
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-ARRAY_0=1
-ARRAY_1=-2.345
-ARRAY_2=Array Element
-ARRAY_3=true
-ARRAY_4=false
-ARRAY_5=null
-ARRAY_6_ARRAY_0=null
-ARRAY_6_ARRAY_1=false
-ARRAY_6_ARRAY_2=true
-ARRAY_6_NUMERICNEGATIVE=-654
-ARRAY_6_NUMERICPOSITIVE=987
-ARRAY_6_STRING=Nested Object in Array
-ARRAY_7_ARRAY_0=1
-ARRAY_7_ARRAY_1=-2
-ARRAY_7_ARRAY_2=3
-ARRAY_7_ARRAY_3=Nested Array in Object
-ARRAY_7_ARRAY_4=true
-ARRAY_7_ARRAY_5=null
-ARRAY_7_BOOLEANFALSE=false
-ARRAY_7_BOOLEANTRUE=true
-ARRAY_7_FLOATNEGATIVE=-2.71828
-ARRAY_7_FLOATPOSITIVE=3.14159
-ARRAY_7_NULLVALUE=null
-ARRAY_7_NUMERICNEGATIVE=-123
-ARRAY_7_NUMERICPOSITIVE=42
-ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY_7_STRING=Array Element with Object
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
-ARRAY2_0=1
-ARRAY2_1=-2.345
-ARRAY2_2=Array Element
-ARRAY2_3=true
-ARRAY2_4=false
-ARRAY2_5=null
-ARRAY2_6_ARRAY_0=1
-ARRAY2_6_ARRAY_1=-2
-ARRAY2_6_ARRAY_2=3
-ARRAY2_6_ARRAY_3=Nested Array in Object2
-ARRAY2_6_ARRAY_4=true
-ARRAY2_6_ARRAY_5=null
-ARRAY2_6_BOOLEANFALSE=false
-ARRAY2_6_BOOLEANTRUE=true
-ARRAY2_6_FLOATNEGATIVE=-0.123
-ARRAY2_6_FLOATPOSITIVE=0.987
-ARRAY2_6_NULLVALUE=null
-ARRAY2_6_NUMERICNEGATIVE=-456
-ARRAY2_6_NUMERICPOSITIVE=123
-ARRAY2_6_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-ARRAY2_6_SCIENTIFICINTPOSITIVE=6e4
-ARRAY2_6_SCIENTIFICSMALLPOSITIVE=5e-5
-ARRAY2_6_STRING=Nested Object in Array2
-ARRAY2_7_ARRAY_0=1
-ARRAY2_7_ARRAY_1=-2
-ARRAY2_7_ARRAY_2=3
-ARRAY2_7_ARRAY_3=Nested Array in Object2
-ARRAY2_7_ARRAY_4=true
-ARRAY2_7_ARRAY_5=null
-ARRAY2_7_BOOLEANFALSE=false
-ARRAY2_7_BOOLEANTRUE=true
-ARRAY2_7_FLOATNEGATIVE=-2.71828
-ARRAY2_7_FLOATPOSITIVE=3.14159
-ARRAY2_7_NULLVALUE=null
-ARRAY2_7_NUMERICNEGATIVE=-123
-ARRAY2_7_NUMERICPOSITIVE=42
-ARRAY2_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY2_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY2_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY2_7_STRING=Array Element with Object in Array2
-ARRAY_0=1
-ARRAY_1=-2.345
-ARRAY_2=Array Element
-ARRAY_3=true
-ARRAY_4=false
-ARRAY_5=null
-ARRAY_6_ARRAY_0=null
-ARRAY_6_ARRAY_1=false
-ARRAY_6_ARRAY_2=true
-ARRAY_6_NUMERICNEGATIVE=-654
-ARRAY_6_NUMERICPOSITIVE=987
-ARRAY_6_STRING=Nested Object in Array
-ARRAY_7_ARRAY_0=1
-ARRAY_7_ARRAY_1=-2
-ARRAY_7_ARRAY_2=3
-ARRAY_7_ARRAY_3=Nested Array in Object
-ARRAY_7_ARRAY_4=true
-ARRAY_7_ARRAY_5=null
-ARRAY_7_BOOLEANFALSE=false
-ARRAY_7_BOOLEANTRUE=true
-ARRAY_7_FLOATNEGATIVE=-2.71828
-ARRAY_7_FLOATPOSITIVE=3.14159
-ARRAY_7_NULLVALUE=null
-ARRAY_7_NUMERICNEGATIVE=-123
-ARRAY_7_NUMERICPOSITIVE=42
-ARRAY_7_SCIENTIFICFLOATNEGATIVE=-2.5e-3
-ARRAY_7_SCIENTIFICINTPOSITIVE=1e5
-ARRAY_7_SCIENTIFICSMALLPOSITIVE=1e-4
-ARRAY_7_STRING=Array Element with Object
-BOOLEANFALSE=false
-BOOLEANTRUE=true
-FLOATNEGATIVE=-2.71828
-FLOATPOSITIVE=3.14159
-NULLVALUE=null
-NUMERICNEGATIVE=-123
-NUMERICPOSITIVE=42
-OBJECT_ARRAY_0=1
-OBJECT_ARRAY_1=-2
-OBJECT_ARRAY_2=3
-OBJECT_ARRAY_3=Nested Array
-OBJECT_ARRAY_4=true
-OBJECT_ARRAY_5=null
-OBJECT_BOOLEANFALSE=false
-OBJECT_BOOLEANTRUE=true
-OBJECT_FLOATNEGATIVE=-0.123
-OBJECT_FLOATPOSITIVE=0.987
-OBJECT_NULLVALUE=null
-OBJECT_NUMERICNEGATIVE=-456
-OBJECT_NUMERICPOSITIVE=123
-OBJECT_SCIENTIFICFLOATNEGATIVE=-1.5e-2
-OBJECT_SCIENTIFICINTPOSITIVE=6e4
-OBJECT_SCIENTIFICSMALLPOSITIVE=5e-5
-OBJECT_STRING=Nested Object
-SCIENTIFICFLOATNEGATIVE=-2.5e-3
-SCIENTIFICINTPOSITIVE=1e5
-SCIENTIFICSMALLPOSITIVE=1e-4
-STRING=Hello, World!
-
diff --git a/collectors/log2journal/tests.d/logfmt.log b/collectors/log2journal/tests.d/logfmt.log
deleted file mode 100644
index e55a83bbb..000000000
--- a/collectors/log2journal/tests.d/logfmt.log
+++ /dev/null
@@ -1,5 +0,0 @@
-key1=value01 key2=value02 key3=value03 key4=value04
-key1=value11 key2=value12 key3=value13 key4=
-key1=value21 key2=value22 key3=value23 key4=value24
-key1=value31 key2=value32 key3=value33 key4=
-key1=value41 key2=value42 key3=value43 key4=value44
diff --git a/collectors/log2journal/tests.d/logfmt.output b/collectors/log2journal/tests.d/logfmt.output
deleted file mode 100644
index 4291c9665..000000000
--- a/collectors/log2journal/tests.d/logfmt.output
+++ /dev/null
@@ -1,37 +0,0 @@
-INJECTED=Key INJECTED had value 'value01 - value02' and now has this, but only on the first row of the log.
-KEY1=value01
-KEY2=value02
-KEY3=value03
-KEY4=value04
-SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value02'
-YET_ANOTHER_INJECTION=value01 - value02 - Key INJECTED had value 'value01 - value02' and now has this, but only on the first row of the log. - this should work because inject is yes
-
-INJECTED=value11 - value12
-KEY1=value11
-KEY2=value12
-KEY3=value13
-SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value12'
-YET_ANOTHER_INJECTION=value11 - value12 - value11 - value12 - this should work because inject is yes
-
-INJECTED=KEY4 has the value 'value24'; it is not empty, so INJECTED has been rewritten.
-KEY1=value21
-KEY2=value22
-KEY3=value23
-KEY4=value24
-SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value22'
-YET_ANOTHER_INJECTION=value21 - value22 - KEY4 has the value 'value24'; it is not empty, so INJECTED has been rewritten. - this should work because inject is yes
-
-INJECTED=value31 - value32
-KEY1=value31
-KEY2=value32
-KEY3=value33
-YET_ANOTHER_INJECTION=value31 - value32 - value31 - value32 - this should work because inject is yes
-
-INJECTED=KEY4 has the value 'value44'; it is not empty, so INJECTED has been rewritten.
-KEY1=value41
-KEY2=value42
-KEY3=value43
-KEY4=value44
-SIMPLE_INJECTION=An unset variable looks like '', while the value of KEY2 is 'value42'
-YET_ANOTHER_INJECTION=value41 - value42 - KEY4 has the value 'value44'; it is not empty, so INJECTED has been rewritten. - this should work because inject is yes
-
diff --git a/collectors/log2journal/tests.d/logfmt.yaml b/collectors/log2journal/tests.d/logfmt.yaml
deleted file mode 100644
index 91e93a71e..000000000
--- a/collectors/log2journal/tests.d/logfmt.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-pattern: logfmt
-
-inject:
- - key: SIMPLE_INJECTION
- value: "An unset variable looks like '${this}', while the value of KEY2 is '${KEY2}'"
-
-rewrite:
- - key: INJECTED
- value: "${KEY1} - ${KEY2}"
- inject: yes
- stop: no
-
- - key: INJECTED
- match: '^value01'
- value: "Key INJECTED had value '${INJECTED}' and now has this, but only on the first row of the log."
-
- - key: INJECTED
- not_empty: "${KEY4}"
- value: "KEY4 has the value '${KEY4}'; it is not empty, so INJECTED has been rewritten."
-
- - key: INJECTED
- match: '^KEY4 has the value'
- value: "This value should not appear in the logs, because the previous one matched and stopped the pipeline."
-
- - key: ANOTHER_INJECTION
- value: "${KEY1} - ${KEY2} - ${INJECTED} - should not work because inject is not true amd ANOTHER_INJECTION is not in the log file."
-
- - key: YET_ANOTHER_INJECTION
- value: "${KEY1} - ${KEY2} - ${INJECTED} - this should work because inject is yes"
- inject: yes
-
- - key: SIMPLE_INJECTION
- match: "KEY2 is 'value32'"
- value: "" # empty, so SIMPLE_INJECTION should not be available on row 3
diff --git a/collectors/log2journal/tests.d/nginx-combined.log b/collectors/log2journal/tests.d/nginx-combined.log
deleted file mode 100644
index b0faa81e9..000000000
--- a/collectors/log2journal/tests.d/nginx-combined.log
+++ /dev/null
@@ -1,14 +0,0 @@
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "GET /api/v1/data?chart=system.net&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349 HTTP/1.1" 200 4844 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:27 +0000] "OPTIONS /api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-127.0.0.1 - - [30/Nov/2023:19:35:28 +0000] "GET /stub_status HTTP/1.1" 200 120 "-" "Go-http-client/1.1"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1" 200 1918 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1" 200 1632 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1" 200 588 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1" 200 6085 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1" 200 1918 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "OPTIONS /api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1" 200 29 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
-2a02:169:1210::2000 - - [30/Nov/2023:19:35:28 +0000] "GET /api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1" 200 3503 "http://192.168.69.5:19999/" "Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36"
diff --git a/collectors/log2journal/tests.d/nginx-combined.output b/collectors/log2journal/tests.d/nginx-combined.output
deleted file mode 100644
index 07fd11014..000000000
--- a/collectors/log2journal/tests.d/nginx-combined.output
+++ /dev/null
@@ -1,210 +0,0 @@
-MESSAGE=GET /api/v1/data?chart=system.net&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349 HTTP/1.1
-NGINX_BODY_BYTES_SENT=4844
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=system.net&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775349
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:27 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /stub_status HTTP/1.1
-NGINX_BODY_BYTES_SENT=120
-NGINX_HTTP_REFERER=-
-NGINX_HTTP_USER_AGENT=Go-http-client/1.1
-NGINX_REMOTE_ADDR=127.0.0.1
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/stub_status
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359 HTTP/1.1
-NGINX_BODY_BYTES_SENT=1918
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=out&_=1701372775359
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357 HTTP/1.1
-NGINX_BODY_BYTES_SENT=1632
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.requests&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775357
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358 HTTP/1.1
-NGINX_BODY_BYTES_SENT=588
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.clients&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775358
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360 HTTP/1.1
-NGINX_BODY_BYTES_SENT=6085
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=system.cpu&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775360
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361 HTTP/1.1
-NGINX_BODY_BYTES_SENT=1918
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=netdata.net&format=array&points=300&group=average&gtime=0&options=absolute%7Cjsonwrap%7Cnonzero&after=-300&dimensions=in&_=1701372775361
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=OPTIONS /api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1
-NGINX_BODY_BYTES_SENT=29
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=OPTIONS
-NGINX_REQUEST_URI=/api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362 HTTP/1.1
-NGINX_BODY_BYTES_SENT=3503
-NGINX_HTTP_REFERER=http://192.168.69.5:19999/
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (X11; CrOS armv7l 13597.84.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.98 Safari/537.36
-NGINX_REMOTE_ADDR=2a02:169:1210::2000
-NGINX_REMOTE_USER=-
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_URI=/api/v1/data?chart=system.io&format=json&points=267&group=average&gtime=0&options=ms%7Cflip%7Cjsonwrap%7Cnonzero&after=-300&_=1701372775362
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIME_LOCAL=30/Nov/2023:19:35:28 +0000
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
diff --git a/collectors/log2journal/tests.d/nginx-json.log b/collectors/log2journal/tests.d/nginx-json.log
deleted file mode 100644
index 7e2b5d5f5..000000000
--- a/collectors/log2journal/tests.d/nginx-json.log
+++ /dev/null
@@ -1,9 +0,0 @@
-{"msec":"1644997905.123","connection":12345,"connection_requests":5,"pid":9876,"request_id":"8f3ebc1e38fbb92f","request_length":345,"remote_addr":"192.168.1.100","remote_user":"john_doe","remote_port":54321,"time_local":"19/Feb/2023:14:15:05 +0000","request":"GET /index.html HTTP/1.1","request_uri":"/index.html?param=value","args":"param=value","status":200,"body_bytes_sent":5432,"bytes_sent":6543,"http_referer":"https://example.com","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"192.168.1.50, 10.0.0.1","host":"example.com","request_time":0.123,"upstream":"10.0.0.2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"MISS","ssl_protocol":"TLSv1.2","ssl_cipher":"AES256-SHA256","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":".","gzip_ratio":"2.1","http_cf_ray":"abc123def456","geoip_country_code":"US"}
-{"msec":"1644997910.789","connection":54321,"connection_requests":10,"pid":5432,"request_id":"4a7bca5e19d3f8e7","request_length":432,"remote_addr":"10.0.0.3","remote_user":"","remote_port":12345,"time_local":"19/Feb/2023:14:15:10 +0000","request":"POST /api/update HTTP/1.1","request_uri":"/api/update","args":"","status":204,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"curl/7.68.0","http_x_forwarded_for":"","host":"api.example.com","request_time":0.032,"upstream":"backend-server-1:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"POST","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""}
-{"msec":"1644997920.456","connection":98765,"connection_requests":15,"pid":1234,"request_id":"63f8ad2c3e1b4090","request_length":567,"remote_addr":"2001:0db8:85a3:0000:0000:8a2e:0370:7334","remote_user":"alice","remote_port":6789,"time_local":"19/Feb/2023:14:15:20 +0000","request":"GET /page?param1=value1&param2=value2 HTTP/2.0","request_uri":"/page?param1=value1&param2=value2","args":"param1=value1&param2=value2","status":404,"body_bytes_sent":0,"bytes_sent":0,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"example.org","request_time":0.045,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"GB"}
-{"msec":"1644997930.987","connection":123,"connection_requests":3,"pid":5678,"request_id":"9e632a5b24c18f76","request_length":234,"remote_addr":"192.168.0.1","remote_user":"jane_doe","remote_port":9876,"time_local":"19/Feb/2023:14:15:30 +0000","request":"PUT /api/update HTTP/1.1","request_uri":"/api/update","args":"","status":500,"body_bytes_sent":543,"bytes_sent":876,"http_referer":"https://example.com/page","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"api.example.com","request_time":0.123,"upstream":"backend-server-2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"HIT","ssl_protocol":"TLSv1.2","ssl_cipher":"AES256-SHA256","scheme":"https","request_method":"PUT","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"1.8","http_cf_ray":"xyz789abc123","geoip_country_code":"CA"}
-{"msec":"1644997940.234","connection":9876,"connection_requests":8,"pid":4321,"request_id":"1b6c59c8aef7d24a","request_length":456,"remote_addr":"203.0.113.1","remote_user":"","remote_port":5432,"time_local":"19/Feb/2023:14:15:40 +0000","request":"DELETE /api/resource HTTP/2.0","request_uri":"/api/resource","args":"","status":204,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"curl/7.68.0","http_x_forwarded_for":"","host":"api.example.com","request_time":0.032,"upstream":"backend-server-1:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"DELETE","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""}
-{"msec":"1644997950.789","connection":5432,"connection_requests":12,"pid":6543,"request_id":"72692d781d0b8a4f","request_length":789,"remote_addr":"198.51.100.2","remote_user":"bob","remote_port":8765,"time_local":"19/Feb/2023:14:15:50 +0000","request":"GET /profile?user=bob HTTP/1.1","request_uri":"/profile?user=bob","args":"user=bob","status":200,"body_bytes_sent":1234,"bytes_sent":2345,"http_referer":"","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"example.com","request_time":0.065,"upstream":"10.0.0.2:8080","upstream_connect_time":0.045,"upstream_header_time":0.020,"upstream_response_time":0.058,"upstream_response_length":7890,"upstream_cache_status":"MISS","ssl_protocol":"TLSv1.3","ssl_cipher":"AES128-GCM-SHA256","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"US"}
-{"msec":"1644997960.321","connection":65432,"connection_requests":7,"pid":7890,"request_id":"c3e158d41e75a9d7","request_length":321,"remote_addr":"203.0.113.2","remote_user":"","remote_port":9876,"time_local":"19/Feb/2023:14:15:60 +0000","request":"GET /dashboard HTTP/2.0","request_uri":"/dashboard","args":"","status":301,"body_bytes_sent":0,"bytes_sent":123,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"dashboard.example.org","request_time":0.032,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/2.0","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""}
-{"msec":"1644997970.555","connection":8765,"connection_requests":9,"pid":8765,"request_id":"f9f6e8235de54af4","request_length":654,"remote_addr":"10.0.0.4","remote_user":"","remote_port":12345,"time_local":"19/Feb/2023:14:15:70 +0000","request":"POST /submit-form HTTP/1.1","request_uri":"/submit-form","args":"","status":201,"body_bytes_sent":876,"bytes_sent":987,"http_referer":"","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)","http_x_forwarded_for":"","host":"example.com","request_time":0.045,"upstream":"backend-server-3:8080","upstream_connect_time":0.012,"upstream_header_time":0.020,"upstream_response_time":0.010,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"http","request_method":"POST","server_protocol":"HTTP/1.1","pipe":"p","gzip_ratio":"","http_cf_ray":"","geoip_country_code":""}
-{"msec":"1644997980.987","connection":23456,"connection_requests":6,"pid":3456,"request_id":"2ec3e8859e7a406c","request_length":432,"remote_addr":"198.51.100.3","remote_user":"mary","remote_port":5678,"time_local":"19/Feb/2023:14:15:80 +0000","request":"GET /contact HTTP/1.1","request_uri":"/contact","args":"","status":404,"body_bytes_sent":0,"bytes_sent":0,"http_referer":"","http_user_agent":"Mozilla/5.0 (Linux; Android 10; Pixel 3)","http_x_forwarded_for":"","host":"example.org","request_time":0.032,"upstream":"","upstream_connect_time":0.0,"upstream_header_time":0.0,"upstream_response_time":0.0,"upstream_response_length":0,"upstream_cache_status":"","ssl_protocol":"","ssl_cipher":"","scheme":"https","request_method":"GET","server_protocol":"HTTP/1.1","pipe":".","gzip_ratio":"","http_cf_ray":"","geoip_country_code":"FR"}
diff --git a/collectors/log2journal/tests.d/nginx-json.output b/collectors/log2journal/tests.d/nginx-json.output
deleted file mode 100644
index e7db9dcbd..000000000
--- a/collectors/log2journal/tests.d/nginx-json.output
+++ /dev/null
@@ -1,296 +0,0 @@
-MESSAGE=GET /index.html HTTP/1.1
-NGINX_BODY_BYTES_SENT=5432
-NGINX_BYTES_SENT=6543
-NGINX_CONNECTION=12345
-NGINX_CONNECTION_REQUESTS=5
-NGINX_GEOIP_COUNTRY_CODE=US
-NGINX_GZIP_RATIO=2.1
-NGINX_HOST=example.com
-NGINX_HTTP_CF_RAY=abc123def456
-NGINX_HTTP_REFERER=https://example.com
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64)
-NGINX_HTTP_X_FORWARDED_FOR=192.168.1.50, 10.0.0.1
-NGINX_PID=9876
-NGINX_PIPELINED=no
-NGINX_QUERY_STRING=param=value
-NGINX_REMOTE_ADDR=192.168.1.100
-NGINX_REMOTE_PORT=54321
-NGINX_REMOTE_USER=john_doe
-NGINX_REQUEST_ID=8f3ebc1e38fbb92f
-NGINX_REQUEST_LENGTH=345
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_TIME=0.123
-NGINX_REQUEST_URI=/index.html?param=value
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_SSL_CIPHER=AES256-SHA256
-NGINX_SSL_PROTOCOL=TLSv1.2
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIMESTAMP_SEC=1644997905.123
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:05 +0000
-NGINX_UPSTREAM=10.0.0.2:8080
-NGINX_UPSTREAM_CACHE_STATUS=MISS
-NGINX_UPSTREAM_CONNECT_TIME=0.045
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=7890
-NGINX_UPSTREAM_RESPONSE_TIME=0.058
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=POST /api/update HTTP/1.1
-NGINX_BODY_BYTES_SENT=0
-NGINX_BYTES_SENT=123
-NGINX_CONNECTION=54321
-NGINX_CONNECTION_REQUESTS=10
-NGINX_HOST=api.example.com
-NGINX_HTTP_USER_AGENT=curl/7.68.0
-NGINX_PID=5432
-NGINX_PIPELINED=yes
-NGINX_REMOTE_ADDR=10.0.0.3
-NGINX_REMOTE_PORT=12345
-NGINX_REQUEST_ID=4a7bca5e19d3f8e7
-NGINX_REQUEST_LENGTH=432
-NGINX_REQUEST_METHOD=POST
-NGINX_REQUEST_TIME=0.032
-NGINX_REQUEST_URI=/api/update
-NGINX_SCHEME=http
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=204
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIMESTAMP_SEC=1644997910.789
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:10 +0000
-NGINX_UPSTREAM=backend-server-1:8080
-NGINX_UPSTREAM_CONNECT_TIME=0.012
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.010
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /page?param1=value1&param2=value2 HTTP/2.0
-NGINX_BODY_BYTES_SENT=0
-NGINX_BYTES_SENT=0
-NGINX_CONNECTION=98765
-NGINX_CONNECTION_REQUESTS=15
-NGINX_GEOIP_COUNTRY_CODE=GB
-NGINX_HOST=example.org
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3)
-NGINX_PID=1234
-NGINX_PIPELINED=no
-NGINX_QUERY_STRING=param1=value1&param2=value2
-NGINX_REMOTE_ADDR=2001:0db8:85a3:0000:0000:8a2e:0370:7334
-NGINX_REMOTE_PORT=6789
-NGINX_REMOTE_USER=alice
-NGINX_REQUEST_ID=63f8ad2c3e1b4090
-NGINX_REQUEST_LENGTH=567
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_TIME=0.045
-NGINX_REQUEST_URI=/page?param1=value1&param2=value2
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/2.0
-NGINX_STATUS=404
-NGINX_STATUS_FAMILY=4xx
-NGINX_TIMESTAMP_SEC=1644997920.456
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:20 +0000
-NGINX_UPSTREAM_CONNECT_TIME=0.0
-NGINX_UPSTREAM_HEADER_TIME=0.0
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.0
-PRIORITY=5
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=PUT /api/update HTTP/1.1
-NGINX_BODY_BYTES_SENT=543
-NGINX_BYTES_SENT=876
-NGINX_CONNECTION=123
-NGINX_CONNECTION_REQUESTS=3
-NGINX_GEOIP_COUNTRY_CODE=CA
-NGINX_GZIP_RATIO=1.8
-NGINX_HOST=api.example.com
-NGINX_HTTP_CF_RAY=xyz789abc123
-NGINX_HTTP_REFERER=https://example.com/page
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64)
-NGINX_PID=5678
-NGINX_PIPELINED=yes
-NGINX_REMOTE_ADDR=192.168.0.1
-NGINX_REMOTE_PORT=9876
-NGINX_REMOTE_USER=jane_doe
-NGINX_REQUEST_ID=9e632a5b24c18f76
-NGINX_REQUEST_LENGTH=234
-NGINX_REQUEST_METHOD=PUT
-NGINX_REQUEST_TIME=0.123
-NGINX_REQUEST_URI=/api/update
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_SSL_CIPHER=AES256-SHA256
-NGINX_SSL_PROTOCOL=TLSv1.2
-NGINX_STATUS=500
-NGINX_STATUS_FAMILY=5xx
-NGINX_TIMESTAMP_SEC=1644997930.987
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:30 +0000
-NGINX_UPSTREAM=backend-server-2:8080
-NGINX_UPSTREAM_CACHE_STATUS=HIT
-NGINX_UPSTREAM_CONNECT_TIME=0.045
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=7890
-NGINX_UPSTREAM_RESPONSE_TIME=0.058
-PRIORITY=3
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=DELETE /api/resource HTTP/2.0
-NGINX_BODY_BYTES_SENT=0
-NGINX_BYTES_SENT=123
-NGINX_CONNECTION=9876
-NGINX_CONNECTION_REQUESTS=8
-NGINX_HOST=api.example.com
-NGINX_HTTP_USER_AGENT=curl/7.68.0
-NGINX_PID=4321
-NGINX_PIPELINED=no
-NGINX_REMOTE_ADDR=203.0.113.1
-NGINX_REMOTE_PORT=5432
-NGINX_REQUEST_ID=1b6c59c8aef7d24a
-NGINX_REQUEST_LENGTH=456
-NGINX_REQUEST_METHOD=DELETE
-NGINX_REQUEST_TIME=0.032
-NGINX_REQUEST_URI=/api/resource
-NGINX_SCHEME=http
-NGINX_SERVER_PROTOCOL=HTTP/2.0
-NGINX_STATUS=204
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIMESTAMP_SEC=1644997940.234
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:40 +0000
-NGINX_UPSTREAM=backend-server-1:8080
-NGINX_UPSTREAM_CONNECT_TIME=0.012
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.010
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /profile?user=bob HTTP/1.1
-NGINX_BODY_BYTES_SENT=1234
-NGINX_BYTES_SENT=2345
-NGINX_CONNECTION=5432
-NGINX_CONNECTION_REQUESTS=12
-NGINX_GEOIP_COUNTRY_CODE=US
-NGINX_HOST=example.com
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64)
-NGINX_PID=6543
-NGINX_PIPELINED=yes
-NGINX_QUERY_STRING=user=bob
-NGINX_REMOTE_ADDR=198.51.100.2
-NGINX_REMOTE_PORT=8765
-NGINX_REMOTE_USER=bob
-NGINX_REQUEST_ID=72692d781d0b8a4f
-NGINX_REQUEST_LENGTH=789
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_TIME=0.065
-NGINX_REQUEST_URI=/profile?user=bob
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_SSL_CIPHER=AES128-GCM-SHA256
-NGINX_SSL_PROTOCOL=TLSv1.3
-NGINX_STATUS=200
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIMESTAMP_SEC=1644997950.789
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:50 +0000
-NGINX_UPSTREAM=10.0.0.2:8080
-NGINX_UPSTREAM_CACHE_STATUS=MISS
-NGINX_UPSTREAM_CONNECT_TIME=0.045
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=7890
-NGINX_UPSTREAM_RESPONSE_TIME=0.058
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /dashboard HTTP/2.0
-NGINX_BODY_BYTES_SENT=0
-NGINX_BYTES_SENT=123
-NGINX_CONNECTION=65432
-NGINX_CONNECTION_REQUESTS=7
-NGINX_HOST=dashboard.example.org
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3)
-NGINX_PID=7890
-NGINX_PIPELINED=no
-NGINX_REMOTE_ADDR=203.0.113.2
-NGINX_REMOTE_PORT=9876
-NGINX_REQUEST_ID=c3e158d41e75a9d7
-NGINX_REQUEST_LENGTH=321
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_TIME=0.032
-NGINX_REQUEST_URI=/dashboard
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/2.0
-NGINX_STATUS=301
-NGINX_STATUS_FAMILY=3xx
-NGINX_TIMESTAMP_SEC=1644997960.321
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:60 +0000
-NGINX_UPSTREAM_CONNECT_TIME=0.0
-NGINX_UPSTREAM_HEADER_TIME=0.0
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.0
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=POST /submit-form HTTP/1.1
-NGINX_BODY_BYTES_SENT=876
-NGINX_BYTES_SENT=987
-NGINX_CONNECTION=8765
-NGINX_CONNECTION_REQUESTS=9
-NGINX_HOST=example.com
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64)
-NGINX_PID=8765
-NGINX_PIPELINED=yes
-NGINX_REMOTE_ADDR=10.0.0.4
-NGINX_REMOTE_PORT=12345
-NGINX_REQUEST_ID=f9f6e8235de54af4
-NGINX_REQUEST_LENGTH=654
-NGINX_REQUEST_METHOD=POST
-NGINX_REQUEST_TIME=0.045
-NGINX_REQUEST_URI=/submit-form
-NGINX_SCHEME=http
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=201
-NGINX_STATUS_FAMILY=2xx
-NGINX_TIMESTAMP_SEC=1644997970.555
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:70 +0000
-NGINX_UPSTREAM=backend-server-3:8080
-NGINX_UPSTREAM_CONNECT_TIME=0.012
-NGINX_UPSTREAM_HEADER_TIME=0.020
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.010
-PRIORITY=6
-SYSLOG_IDENTIFIER=nginx-log
-
-MESSAGE=GET /contact HTTP/1.1
-NGINX_BODY_BYTES_SENT=0
-NGINX_BYTES_SENT=0
-NGINX_CONNECTION=23456
-NGINX_CONNECTION_REQUESTS=6
-NGINX_GEOIP_COUNTRY_CODE=FR
-NGINX_HOST=example.org
-NGINX_HTTP_USER_AGENT=Mozilla/5.0 (Linux; Android 10; Pixel 3)
-NGINX_PID=3456
-NGINX_PIPELINED=no
-NGINX_REMOTE_ADDR=198.51.100.3
-NGINX_REMOTE_PORT=5678
-NGINX_REMOTE_USER=mary
-NGINX_REQUEST_ID=2ec3e8859e7a406c
-NGINX_REQUEST_LENGTH=432
-NGINX_REQUEST_METHOD=GET
-NGINX_REQUEST_TIME=0.032
-NGINX_REQUEST_URI=/contact
-NGINX_SCHEME=https
-NGINX_SERVER_PROTOCOL=HTTP/1.1
-NGINX_STATUS=404
-NGINX_STATUS_FAMILY=4xx
-NGINX_TIMESTAMP_SEC=1644997980.987
-NGINX_TIME_LOCAL=19/Feb/2023:14:15:80 +0000
-NGINX_UPSTREAM_CONNECT_TIME=0.0
-NGINX_UPSTREAM_HEADER_TIME=0.0
-NGINX_UPSTREAM_RESPONSE_LENGTH=0
-NGINX_UPSTREAM_RESPONSE_TIME=0.0
-PRIORITY=5
-SYSLOG_IDENTIFIER=nginx-log
-
diff --git a/collectors/log2journal/tests.sh b/collectors/log2journal/tests.sh
deleted file mode 100755
index 402438866..000000000
--- a/collectors/log2journal/tests.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/env bash
-
-if [ -f "${PWD}/log2journal" ]; then
- log2journal_bin="${PWD}/log2journal"
-else
- log2journal_bin="$(which log2journal)"
-fi
-
-[ -z "${log2journal_bin}" ] && echo >&2 "Cannot find log2journal binary" && exit 1
-echo >&2 "Using: ${log2journal_bin}"
-
-script_dir=$(dirname "$(readlink -f "$0")")
-tests="${script_dir}/tests.d"
-
-if [ ! -d "${tests}" ]; then
- echo >&2 "tests directory '${tests}' is not found."
- exit 1
-fi
-
-# Create a random directory name in /tmp
-tmp=$(mktemp -d /tmp/script_temp.XXXXXXXXXX)
-
-# Function to clean up the temporary directory on exit
-cleanup() {
- echo "Cleaning up..."
- rm -rf "$tmp"
-}
-
-# Register the cleanup function to run on script exit
-trap cleanup EXIT
-
-# Change to the temporary directory
-cd "$tmp" || exit 1
-
-# -----------------------------------------------------------------------------
-
-test_log2journal_config() {
- local in="${1}"
- local out="${2}"
- shift 2
-
- [ -f output ] && rm output
-
- printf >&2 "running: "
- printf >&2 "%q " "${log2journal_bin}" "${@}"
- printf >&2 "\n"
-
- "${log2journal_bin}" <"${in}" "${@}" >output 2>&1
- ret=$?
-
- [ $ret -ne 0 ] && echo >&2 "${log2journal_bin} exited with code: $ret" && cat output && exit 1
-
- diff --ignore-all-space "${out}" output
- [ $? -ne -0 ] && echo >&2 "${log2journal_bin} output does not match!" && exit 1
-
- echo >&2 "OK"
- echo >&2
-
- return 0
-}
-
-# test yaml parsing
-echo >&2
-echo >&2 "Testing full yaml config parsing..."
-test_log2journal_config /dev/null "${tests}/full.output" -f "${tests}/full.yaml" --show-config || exit 1
-
-echo >&2 "Testing command line parsing..."
-test_log2journal_config /dev/null "${tests}/full.output" --show-config \
- --prefix=NGINX_ \
- --filename-key NGINX_LOG_FILENAME \
- --inject SYSLOG_IDENTIFIER=nginx-log \
- --inject=SYSLOG_IDENTIFIER2=nginx-log2 \
- --inject 'PRIORITY=${NGINX_STATUS}' \
- --inject='NGINX_STATUS_FAMILY=${NGINX_STATUS}${NGINX_METHOD}' \
- --rewrite 'PRIORITY=//${NGINX_STATUS}/inject,dont-stop' \
- --rewrite "PRIORITY=/^[123]/6" \
- --rewrite='PRIORITY=|^4|5' \
- '--rewrite=PRIORITY=-^5-3' \
- --rewrite "PRIORITY=;.*;4" \
- --rewrite 'NGINX_STATUS_FAMILY=|^(?<first_digit>[1-5])|${first_digit}xx' \
- --rewrite 'NGINX_STATUS_FAMILY=|.*|UNKNOWN' \
- --rename TEST1=TEST2 \
- --rename=TEST3=TEST4 \
- --unmatched-key MESSAGE \
- --inject-unmatched PRIORITY=1 \
- --inject-unmatched=PRIORITY2=2 \
- --include=".*" \
- --exclude ".*HELLO.*WORLD.*" \
- '(?x) # Enable PCRE2 extended mode
- ^
- (?<NGINX_REMOTE_ADDR>[^ ]+) \s - \s # NGINX_REMOTE_ADDR
- (?<NGINX_REMOTE_USER>[^ ]+) \s # NGINX_REMOTE_USER
- \[
- (?<NGINX_TIME_LOCAL>[^\]]+) # NGINX_TIME_LOCAL
- \]
- \s+ "
- (?<MESSAGE>
- (?<NGINX_METHOD>[A-Z]+) \s+ # NGINX_METHOD
- (?<NGINX_URL>[^ ]+) \s+
- HTTP/(?<NGINX_HTTP_VERSION>[^"]+)
- )
- " \s+
- (?<NGINX_STATUS>\d+) \s+ # NGINX_STATUS
- (?<NGINX_BODY_BYTES_SENT>\d+) \s+ # NGINX_BODY_BYTES_SENT
- "(?<NGINX_HTTP_REFERER>[^"]*)" \s+ # NGINX_HTTP_REFERER
- "(?<NGINX_HTTP_USER_AGENT>[^"]*)" # NGINX_HTTP_USER_AGENT' \
- || exit 1
-
-# -----------------------------------------------------------------------------
-
-test_log2journal() {
- local n="${1}"
- local in="${2}"
- local out="${3}"
- shift 3
-
- printf >&2 "running test No ${n}: "
- printf >&2 "%q " "${log2journal_bin}" "${@}"
- printf >&2 "\n"
- echo >&2 "using as input : ${in}"
- echo >&2 "expecting output: ${out}"
-
- [ -f output ] && rm output
-
- "${log2journal_bin}" <"${in}" "${@}" >output 2>&1
- ret=$?
-
- [ $ret -ne 0 ] && echo >&2 "${log2journal_bin} exited with code: $ret" && cat output && exit 1
-
- diff "${out}" output
- [ $? -ne -0 ] && echo >&2 "${log2journal_bin} output does not match! - here is what we got:" && cat output && exit 1
-
- echo >&2 "OK"
- echo >&2
-
- return 0
-}
-
-echo >&2
-echo >&2 "Testing parsing and output..."
-
-test_log2journal 1 "${tests}/json.log" "${tests}/json.output" json
-test_log2journal 2 "${tests}/json.log" "${tests}/json-include.output" json --include "OBJECT"
-test_log2journal 3 "${tests}/json.log" "${tests}/json-exclude.output" json --exclude "ARRAY[^2]"
-test_log2journal 4 "${tests}/nginx-json.log" "${tests}/nginx-json.output" -f "${script_dir}/log2journal.d/nginx-json.yaml"
-test_log2journal 5 "${tests}/nginx-combined.log" "${tests}/nginx-combined.output" -f "${script_dir}/log2journal.d/nginx-combined.yaml"
-test_log2journal 6 "${tests}/logfmt.log" "${tests}/logfmt.output" -f "${tests}/logfmt.yaml"
-test_log2journal 7 "${tests}/logfmt.log" "${tests}/default.output" -f "${script_dir}/log2journal.d/default.yaml"
diff --git a/collectors/macos.plugin/Makefile.am b/collectors/macos.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/macos.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/macos.plugin/README.md b/collectors/macos.plugin/README.md
deleted file mode 120000
index 2ea6842e4..000000000
--- a/collectors/macos.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/macos.md \ No newline at end of file
diff --git a/collectors/macos.plugin/integrations/macos.md b/collectors/macos.plugin/integrations/macos.md
deleted file mode 100644
index 5128a5a77..000000000
--- a/collectors/macos.plugin/integrations/macos.md
+++ /dev/null
@@ -1,286 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/macos.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/macos.plugin/metadata.yaml"
-sidebar_label: "macOS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/macOS Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# macOS
-
-
-<img src="https://netdata.cloud/img/macos.svg" width="150"/>
-
-
-Plugin: macos.plugin
-Module: mach_smi
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor macOS metrics for efficient operating system performance.
-
-The plugin uses three different methods to collect data:
- - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.
- - The functtion `host_statistic` is called to collect CPU and Virtual memory data;
- - The function `IOServiceGetMatchingServices` to collect storage information.
-
-
-This collector is only supported on the following platforms:
-
-- macOS
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per macOS instance
-
-These metrics refer to hardware and network monitoring.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.cpu | user, nice, system, idle | percentage |
-| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |
-| mem.swapio | io, out | KiB/s |
-| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |
-| system.load | load1, load5, load15 | load |
-| mem.swap | free, used | MiB |
-| system.ipv4 | received, sent | kilobits/s |
-| ipv4.tcppackets | received, sent | packets/s |
-| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |
-| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |
-| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |
-| ipv4.tcpofo | inqueue | packets/s |
-| ipv4.tcpsyncookies | received, sent, failed | packets/s |
-| ipv4.ecnpkts | CEP, NoECTP | packets/s |
-| ipv4.udppackets | received, sent | packets/s |
-| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |
-| ipv4.icmp | received, sent | packets/s |
-| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |
-| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |
-| ipv4.packets | received, sent, forwarded, delivered | packets/s |
-| ipv4.fragsout | ok, failed, created | packets/s |
-| ipv4.fragsin | ok, failed, all | packets/s |
-| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |
-| ipv6.packets | received, sent, forwarded, delivers | packets/s |
-| ipv6.fragsout | ok, failed, all | packets/s |
-| ipv6.fragsin | ok, failed, timeout, all | packets/s |
-| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |
-| ipv6.icmp | received, sent | messages/s |
-| ipv6.icmpredir | received, sent | redirects/s |
-| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |
-| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |
-| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |
-| system.uptime | uptime | seconds |
-| system.io | in, out | KiB/s |
-
-### Per disk
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.io | read, writes | KiB/s |
-| disk.ops | read, writes | operations/s |
-| disk.util | utilization | % of time working |
-| disk.iotime | reads, writes | milliseconds/s |
-| disk.await | reads, writes | milliseconds/operation |
-| disk.avgsz | reads, writes | KiB/operation |
-| disk.svctm | svctm | milliseconds/operation |
-
-### Per mount point
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.space | avail, used, reserved_for_root | GiB |
-| disk.inodes | avail, used, reserved_for_root | inodes |
-
-### Per network device
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| net.net | received, sent | kilobits/s |
-| net.packets | received, sent, multicast_received, multicast_sent | packets/s |
-| net.errors | inbound, outbound | errors/s |
-| net.drops | inbound | drops/s |
-| net.events | frames, collisions, carrier | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-There are three sections in the file which you can configure:
-
-- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.
-- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.
-- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |
-| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |
-| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |
-| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |
-| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |
-| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |
-| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |
-| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |
-| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |
-| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |
-| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |
-| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |
-| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |
-| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |
-| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |
-| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |
-| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |
-| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |
-| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |
-| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |
-| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |
-| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |
-| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |
-| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |
-| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |
-| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |
-| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |
-| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |
-| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |
-| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |
-| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |
-| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |
-| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |
-| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |
-| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |
-| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |
-| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |
-| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |
-
-</details>
-
-#### Examples
-
-##### Disable swap monitoring.
-
-A basic example that discards swap monitoring
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:macos:sysctl]
- system swap = no
-[plugin:macos:mach_smi]
- swap i/o = no
-
-```
-</details>
-
-##### Disable complete Machine SMI section.
-
-A basic example that discards swap monitoring
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:macos:mach_smi]
- cpu utilization = no
- system ram = no
- swap i/o = no
- memory page faults = no
- disk i/o = no
-
-```
-</details>
-
-
diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c
deleted file mode 100644
index 75ef386b9..000000000
--- a/collectors/macos.plugin/macos_fw.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_macos.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-#include <IOKit/IOKitLib.h>
-#include <IOKit/storage/IOBlockStorageDriver.h>
-#include <IOKit/IOBSD.h>
-// NEEDED BY do_space, do_inodes
-#include <sys/mount.h>
-// NEEDED BY: struct ifaddrs, getifaddrs()
-#include <net/if.h>
-#include <ifaddrs.h>
-
-// NEEDED BY: do_bandwidth
-#define IFA_DATA(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s)
-
-#define MAXDRIVENAME 31
-
-#define KILO_FACTOR 1024
-#define MEGA_FACTOR 1048576 // 1024 * 1024
-#define GIGA_FACTOR 1073741824 // 1024 * 1024 * 1024
-
-int do_macos_iokit(int update_every, usec_t dt) {
- (void)dt;
-
- static int do_io = -1, do_space = -1, do_inodes = -1, do_bandwidth = -1;
-
- if (unlikely(do_io == -1)) {
- do_io = config_get_boolean("plugin:macos:iokit", "disk i/o", 1);
- do_space = config_get_boolean("plugin:macos:sysctl", "space usage for all disks", 1);
- do_inodes = config_get_boolean("plugin:macos:sysctl", "inodes usage for all disks", 1);
- do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
- }
-
- RRDSET *st;
-
- mach_port_t main_port;
- io_registry_entry_t drive, drive_media;
- io_iterator_t drive_list;
- CFDictionaryRef properties, statistics;
- CFStringRef name;
- CFNumberRef number;
- kern_return_t status;
- collected_number total_disk_reads = 0;
- collected_number total_disk_writes = 0;
- struct diskstat {
- char name[MAXDRIVENAME];
- collected_number bytes_read;
- collected_number bytes_write;
- collected_number reads;
- collected_number writes;
- collected_number time_read;
- collected_number time_write;
- collected_number latency_read;
- collected_number latency_write;
- } diskstat;
- struct cur_diskstat {
- collected_number duration_read_ns;
- collected_number duration_write_ns;
- collected_number busy_time_ns;
- } cur_diskstat;
- struct prev_diskstat {
- collected_number bytes_read;
- collected_number bytes_write;
- collected_number operations_read;
- collected_number operations_write;
- collected_number duration_read_ns;
- collected_number duration_write_ns;
- collected_number busy_time_ns;
- } prev_diskstat;
-
- // NEEDED BY: do_space, do_inodes
- struct statfs *mntbuf;
- int mntsize, i;
- char title[4096 + 1];
-
- // NEEDED BY: do_bandwidth
- struct ifaddrs *ifa, *ifap;
-
-#if !defined(MAC_OS_VERSION_12_0) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
-#define IOMainPort IOMasterPort
-#endif
-
- /* Get ports and services for drive statistics. */
- if (unlikely(IOMainPort(bootstrap_port, &main_port))) {
- collector_error("MACOS: IOMasterPort() failed");
- do_io = 0;
- collector_error("DISABLED: system.io");
- /* Get the list of all drive objects. */
- } else if (unlikely(IOServiceGetMatchingServices(main_port, IOServiceMatching("IOBlockStorageDriver"), &drive_list))) {
- collector_error("MACOS: IOServiceGetMatchingServices() failed");
- do_io = 0;
- collector_error("DISABLED: system.io");
- } else {
- while ((drive = IOIteratorNext(drive_list)) != 0) {
- properties = 0;
- statistics = 0;
- number = 0;
- bzero(&diskstat, sizeof(diskstat));
-
- /* Get drive media object. */
- status = IORegistryEntryGetChildEntry(drive, kIOServicePlane, &drive_media);
- if (unlikely(status != KERN_SUCCESS)) {
- IOObjectRelease(drive);
- continue;
- }
-
- /* Get drive media properties. */
- if (likely(!IORegistryEntryCreateCFProperties(drive_media, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
- /* Get disk name. */
- if (likely(name = (CFStringRef)CFDictionaryGetValue(properties, CFSTR(kIOBSDNameKey)))) {
- CFStringGetCString(name, diskstat.name, MAXDRIVENAME, kCFStringEncodingUTF8);
- }
- }
-
- /* Release. */
- CFRelease(properties);
- IOObjectRelease(drive_media);
-
- if(unlikely(!*diskstat.name)) {
- IOObjectRelease(drive);
- continue;
- }
-
- /* Obtain the properties for this drive object. */
- if (unlikely(IORegistryEntryCreateCFProperties(drive, (CFMutableDictionaryRef *)&properties, kCFAllocatorDefault, 0))) {
- IOObjectRelease(drive);
- collector_error("MACOS: IORegistryEntryCreateCFProperties() failed");
- do_io = 0;
- collector_error("DISABLED: system.io");
- break;
- } else if (likely(properties)) {
- /* Obtain the statistics from the drive properties. */
- if (likely(statistics = (CFDictionaryRef)CFDictionaryGetValue(properties, CFSTR(kIOBlockStorageDriverStatisticsKey)))) {
-
- // --------------------------------------------------------------------
-
- /* Get bytes read. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_read);
- total_disk_reads += diskstat.bytes_read;
- }
-
- /* Get bytes written. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.bytes_write);
- total_disk_writes += diskstat.bytes_write;
- }
-
- st = rrdset_find_active_bytype_localhost("disk", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.io"
- , "Disk I/O Bandwidth"
- , "KiB/s"
- , "macos.plugin"
- , "iokit"
- , 2000
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- prev_diskstat.bytes_read = rrddim_set(st, "reads", diskstat.bytes_read);
- prev_diskstat.bytes_write = rrddim_set(st, "writes", diskstat.bytes_write);
- rrdset_done(st);
-
- /* Get number of reads. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.reads);
- }
-
- /* Get number of writes. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.writes);
- }
-
- st = rrdset_find_active_bytype_localhost("disk_ops", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_ops"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.ops"
- , "Disk Completed I/O Operations"
- , "operations/s"
- , "macos.plugin"
- , "iokit"
- , 2001
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- prev_diskstat.operations_read = rrddim_set(st, "reads", diskstat.reads);
- prev_diskstat.operations_write = rrddim_set(st, "writes", diskstat.writes);
- rrdset_done(st);
-
- /* Get reads time. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_read);
- }
-
- /* Get writes time. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.time_write);
- }
-
- st = rrdset_find_active_bytype_localhost("disk_util", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_util"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.util"
- , "Disk Utilization Time"
- , "% of time working"
- , "macos.plugin"
- , "iokit"
- , 2004
- , update_every
- , RRDSET_TYPE_AREA
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL);
- }
-
- cur_diskstat.busy_time_ns = (diskstat.time_read + diskstat.time_write);
- prev_diskstat.busy_time_ns = rrddim_set(st, "utilization", cur_diskstat.busy_time_ns);
- rrdset_done(st);
-
- /* Get reads latency. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentReadTimeKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_read);
- }
-
- /* Get writes latency. */
- if (likely(number = (CFNumberRef)CFDictionaryGetValue(statistics, CFSTR(kIOBlockStorageDriverStatisticsLatentWriteTimeKey)))) {
- CFNumberGetValue(number, kCFNumberSInt64Type, &diskstat.latency_write);
- }
-
- st = rrdset_find_active_bytype_localhost("disk_iotime", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_iotime"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.iotime"
- , "Disk Total I/O Time"
- , "milliseconds/s"
- , "macos.plugin"
- , "iokit"
- , 2022
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL);
- }
-
- cur_diskstat.duration_read_ns = diskstat.time_read + diskstat.latency_read;
- cur_diskstat.duration_write_ns = diskstat.time_write + diskstat.latency_write;
- prev_diskstat.duration_read_ns = rrddim_set(st, "reads", cur_diskstat.duration_read_ns);
- prev_diskstat.duration_write_ns = rrddim_set(st, "writes", cur_diskstat.duration_write_ns);
- rrdset_done(st);
-
- // calculate differential charts
- // only if this is not the first time we run
-
- if (likely(dt)) {
- st = rrdset_find_active_bytype_localhost("disk_await", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_await"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.await"
- , "Average Completed I/O Operation Time"
- , "milliseconds/operation"
- , "macos.plugin"
- , "iokit"
- , 2005
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
- (cur_diskstat.duration_read_ns - prev_diskstat.duration_read_ns) / (diskstat.reads - prev_diskstat.operations_read) : 0);
- rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
- (cur_diskstat.duration_write_ns - prev_diskstat.duration_write_ns) / (diskstat.writes - prev_diskstat.operations_write) : 0);
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("disk_avgsz", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_avgsz"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.avgsz"
- , "Average Completed I/O Operation Bandwidth"
- , "KiB/operation"
- , "macos.plugin"
- , "iokit"
- , 2006
- , update_every
- , RRDSET_TYPE_AREA
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "reads", (diskstat.reads - prev_diskstat.operations_read) ?
- (diskstat.bytes_read - prev_diskstat.bytes_read) / (diskstat.reads - prev_diskstat.operations_read) : 0);
- rrddim_set(st, "writes", (diskstat.writes - prev_diskstat.operations_write) ?
- (diskstat.bytes_write - prev_diskstat.bytes_write) / (diskstat.writes - prev_diskstat.operations_write) : 0);
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("disk_svctm", diskstat.name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "disk_svctm"
- , diskstat.name
- , NULL
- , diskstat.name
- , "disk.svctm"
- , "Average Service Time"
- , "milliseconds/operation"
- , "macos.plugin"
- , "iokit"
- , 2007
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "svctm", ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) ?
- (cur_diskstat.busy_time_ns - prev_diskstat.busy_time_ns) / ((diskstat.reads - prev_diskstat.operations_read) + (diskstat.writes - prev_diskstat.operations_write)) : 0);
- rrdset_done(st);
- }
- }
-
- /* Release. */
- CFRelease(properties);
- }
-
- /* Release. */
- IOObjectRelease(drive);
- }
- IOIteratorReset(drive_list);
-
- /* Release. */
- IOObjectRelease(drive_list);
- }
-
- if (likely(do_io)) {
- st = rrdset_find_active_bytype_localhost("system", "io");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "io"
- , NULL
- , "disk"
- , NULL
- , "Disk I/O"
- , "KiB/s"
- , "macos.plugin"
- , "iokit"
- , 150
- , update_every
- , RRDSET_TYPE_AREA
- );
- rrddim_add(st, "in", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "out", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "in", total_disk_reads);
- rrddim_set(st, "out", total_disk_writes);
- rrdset_done(st);
- }
-
- // Can be merged with FreeBSD plugin
-
- if (likely(do_space || do_inodes)) {
- // there is no mount info in sysctl MIBs
- if (unlikely(!(mntsize = getmntinfo(&mntbuf, MNT_NOWAIT)))) {
- collector_error("MACOS: getmntinfo() failed");
- do_space = 0;
- collector_error("DISABLED: disk_space.X");
- do_inodes = 0;
- collector_error("DISABLED: disk_inodes.X");
- } else {
- for (i = 0; i < mntsize; i++) {
- if (mntbuf[i].f_flags == MNT_RDONLY ||
- mntbuf[i].f_blocks == 0 ||
- // taken from gnulib/mountlist.c and shortened to FreeBSD related fstypes
- strcmp(mntbuf[i].f_fstypename, "autofs") == 0 ||
- strcmp(mntbuf[i].f_fstypename, "procfs") == 0 ||
- strcmp(mntbuf[i].f_fstypename, "subfs") == 0 ||
- strcmp(mntbuf[i].f_fstypename, "devfs") == 0 ||
- strcmp(mntbuf[i].f_fstypename, "none") == 0)
- continue;
-
- // --------------------------------------------------------------------------
-
- if (likely(do_space)) {
- st = rrdset_find_active_bytype_localhost("disk_space", mntbuf[i].f_mntonname);
- if (unlikely(!st)) {
- snprintfz(title, sizeof(title) - 1, "Disk Space Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- st = rrdset_create_localhost(
- "disk_space"
- , mntbuf[i].f_mntonname
- , NULL
- , mntbuf[i].f_mntonname
- , "disk.space"
- , title
- , "GiB"
- , "macos.plugin"
- , "iokit"
- , 2023
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrddim_add(st, "avail", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "used", NULL, mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "reserved_for_root", "reserved for root", mntbuf[i].f_bsize, GIGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "avail", (collected_number) mntbuf[i].f_bavail);
- rrddim_set(st, "used", (collected_number) (mntbuf[i].f_blocks - mntbuf[i].f_bfree));
- rrddim_set(st, "reserved_for_root", (collected_number) (mntbuf[i].f_bfree - mntbuf[i].f_bavail));
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------------
-
- if (likely(do_inodes)) {
- st = rrdset_find_active_bytype_localhost("disk_inodes", mntbuf[i].f_mntonname);
- if (unlikely(!st)) {
- snprintfz(title, sizeof(title) - 1, "Disk Files (inodes) Usage for %s [%s]", mntbuf[i].f_mntonname, mntbuf[i].f_mntfromname);
- st = rrdset_create_localhost(
- "disk_inodes"
- , mntbuf[i].f_mntonname
- , NULL
- , mntbuf[i].f_mntonname
- , "disk.inodes"
- , title
- , "inodes"
- , "macos.plugin"
- , "iokit"
- , 2024
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrddim_add(st, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "reserved_for_root", "reserved for root", 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "avail", (collected_number) mntbuf[i].f_ffree);
- rrddim_set(st, "used", (collected_number) (mntbuf[i].f_files - mntbuf[i].f_ffree));
- rrdset_done(st);
- }
- }
- }
- }
-
- // Can be merged with FreeBSD plugin
-
- if (likely(do_bandwidth)) {
- if (unlikely(getifaddrs(&ifap))) {
- collector_error("MACOS: getifaddrs()");
- do_bandwidth = 0;
- collector_error("DISABLED: system.ipv4");
- } else {
- for (ifa = ifap; ifa; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr->sa_family != AF_LINK)
- continue;
-
- st = rrdset_find_active_bytype_localhost("net", ifa->ifa_name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "net"
- , ifa->ifa_name
- , NULL
- , ifa->ifa_name
- , "net.net"
- , "Bandwidth"
- , "kilobits/s"
- , "macos.plugin"
- , "iokit"
- , 7000
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "received", IFA_DATA(ibytes));
- rrddim_set(st, "sent", IFA_DATA(obytes));
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("net_packets", ifa->ifa_name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "net_packets"
- , ifa->ifa_name
- , NULL
- , ifa->ifa_name
- , "net.packets"
- , "Packets"
- , "packets/s"
- , "macos.plugin"
- , "iokit"
- , 7001
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "multicast_sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "received", IFA_DATA(ipackets));
- rrddim_set(st, "sent", IFA_DATA(opackets));
- rrddim_set(st, "multicast_received", IFA_DATA(imcasts));
- rrddim_set(st, "multicast_sent", IFA_DATA(omcasts));
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("net_errors", ifa->ifa_name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "net_errors"
- , ifa->ifa_name
- , NULL
- , ifa->ifa_name
- , "net.errors"
- , "Interface Errors"
- , "errors/s"
- , "macos.plugin"
- , "iokit"
- , 7002
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "inbound", IFA_DATA(ierrors));
- rrddim_set(st, "outbound", IFA_DATA(oerrors));
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("net_drops", ifa->ifa_name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "net_drops"
- , ifa->ifa_name
- , NULL
- , ifa->ifa_name
- , "net.drops"
- , "Interface Drops"
- , "drops/s"
- , "macos.plugin"
- , "iokit"
- , 7003
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "inbound", IFA_DATA(iqdrops));
- rrdset_done(st);
-
- st = rrdset_find_active_bytype_localhost("net_events", ifa->ifa_name);
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "net_events"
- , ifa->ifa_name
- , NULL
- , ifa->ifa_name
- , "net.events"
- , "Network Interface Events"
- , "events/s"
- , "macos.plugin"
- , "iokit"
- , 7006
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "collisions", IFA_DATA(collisions));
- rrdset_done(st);
- }
-
- freeifaddrs(ifap);
- }
- }
-
- return 0;
-}
diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c
deleted file mode 100644
index 30c957187..000000000
--- a/collectors/macos.plugin/macos_mach_smi.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_macos.h"
-
-#include <mach/mach.h>
-
-int do_macos_mach_smi(int update_every, usec_t dt) {
- (void)dt;
-
- static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1;
-
- if (unlikely(do_cpu == -1)) {
- do_cpu = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1);
- do_ram = config_get_boolean("plugin:macos:mach_smi", "system ram", 1);
- do_swapio = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1);
- do_pgfaults = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1);
- }
-
- RRDSET *st;
-
- kern_return_t kr;
- mach_msg_type_number_t count;
- host_t host;
- vm_size_t system_pagesize;
-
-
- // NEEDED BY: do_cpu
- natural_t cp_time[CPU_STATE_MAX];
-
- // NEEDED BY: do_ram, do_swapio, do_pgfaults
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
- vm_statistics64_data_t vm_statistics;
-#else
- vm_statistics_data_t vm_statistics;
-#endif
-
- host = mach_host_self();
- kr = host_page_size(host, &system_pagesize);
- if (unlikely(kr != KERN_SUCCESS))
- return -1;
-
- if (likely(do_cpu)) {
- if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) {
- collector_error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT);
- do_cpu = 0;
- collector_error("DISABLED: system.cpu");
- } else {
- count = HOST_CPU_LOAD_INFO_COUNT;
- kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count);
- if (unlikely(kr != KERN_SUCCESS)) {
- collector_error("MACOS: host_statistics() failed: %s", mach_error_string(kr));
- do_cpu = 0;
- collector_error("DISABLED: system.cpu");
- } else {
-
- st = rrdset_find_active_bytype_localhost("system", "cpu");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "cpu"
- , NULL
- , "cpu"
- , "system.cpu"
- , "Total CPU utilization"
- , "percentage"
- , "macos.plugin"
- , "mach_smi"
- , 100
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrddim_add(st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "nice", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "system", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_add(st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(st, "idle");
- }
-
- rrddim_set(st, "user", cp_time[CPU_STATE_USER]);
- rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]);
- rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]);
- rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]);
- rrdset_done(st);
- }
- }
- }
-
- if (likely(do_ram || do_swapio || do_pgfaults)) {
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1060)
- count = sizeof(vm_statistics64_data_t);
- kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count);
-#else
- count = sizeof(vm_statistics_data_t);
- kr = host_statistics(host, HOST_VM_INFO, (host_info_t)&vm_statistics, &count);
-#endif
- if (unlikely(kr != KERN_SUCCESS)) {
- collector_error("MACOS: host_statistics64() failed: %s", mach_error_string(kr));
- do_ram = 0;
- collector_error("DISABLED: system.ram");
- do_swapio = 0;
- collector_error("DISABLED: mem.swapio");
- do_pgfaults = 0;
- collector_error("DISABLED: mem.pgfaults");
- } else {
- if (likely(do_ram)) {
- st = rrdset_find_active_localhost("system.ram");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "ram"
- , NULL
- , "ram"
- , NULL
- , "System RAM"
- , "MiB"
- , "macos.plugin"
- , "mach_smi"
- , 200
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrddim_add(st, "active", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "wired", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
-#endif
- rrddim_add(st, "inactive", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "free", NULL, system_pagesize, 1048576, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "active", vm_statistics.active_count);
- rrddim_set(st, "wired", vm_statistics.wire_count);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_set(st, "throttled", vm_statistics.throttled_count);
- rrddim_set(st, "compressor", vm_statistics.compressor_page_count);
-#endif
- rrddim_set(st, "inactive", vm_statistics.inactive_count);
- rrddim_set(st, "purgeable", vm_statistics.purgeable_count);
- rrddim_set(st, "speculative", vm_statistics.speculative_count);
- rrddim_set(st, "free", (vm_statistics.free_count - vm_statistics.speculative_count));
- rrdset_done(st);
- }
-
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- if (likely(do_swapio)) {
- st = rrdset_find_active_localhost("mem.swapio");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem"
- , "swapio"
- , NULL
- , "swap"
- , NULL
- , "Swap I/O"
- , "KiB/s"
- , "macos.plugin"
- , "mach_smi"
- , 250
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrddim_add(st, "in", NULL, system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "in", vm_statistics.swapins);
- rrddim_set(st, "out", vm_statistics.swapouts);
- rrdset_done(st);
- }
-#endif
-
- if (likely(do_pgfaults)) {
- st = rrdset_find_active_localhost("mem.pgfaults");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem"
- , "pgfaults"
- , NULL
- , "system"
- , NULL
- , "Memory Page Faults"
- , "faults/s"
- , "macos.plugin"
- , "mach_smi"
- , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "pagein", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "pageout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_add(st, "compress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "decompress", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- rrddim_add(st, "zero_fill", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "reactivate", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "purge", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "memory", vm_statistics.faults);
- rrddim_set(st, "cow", vm_statistics.cow_faults);
- rrddim_set(st, "pagein", vm_statistics.pageins);
- rrddim_set(st, "pageout", vm_statistics.pageouts);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_set(st, "compress", vm_statistics.compressions);
- rrddim_set(st, "decompress", vm_statistics.decompressions);
-#endif
- rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count);
- rrddim_set(st, "reactivate", vm_statistics.reactivations);
- rrddim_set(st, "purge", vm_statistics.purges);
- rrdset_done(st);
- }
- }
- }
-
- return 0;
-}
diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c
deleted file mode 100644
index 520d2f938..000000000
--- a/collectors/macos.plugin/macos_sysctl.c
+++ /dev/null
@@ -1,1424 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_macos.h"
-
-#include <Availability.h>
-// NEEDED BY: do_bandwidth
-#include <net/route.h>
-// NEEDED BY do_tcp...
-#include <sys/socketvar.h>
-#include <netinet/tcp_var.h>
-#include <netinet/tcp_fsm.h>
-// NEEDED BY do_udp..., do_ip...
-#include <netinet/ip_var.h>
-// NEEDED BY do_udp...
-#include <netinet/udp.h>
-#include <netinet/udp_var.h>
-// NEEDED BY do_icmp...
-#include <netinet/ip.h>
-#include <netinet/ip_icmp.h>
-#include <netinet/icmp_var.h>
-// NEEDED BY do_icmp6...
-#include <netinet/icmp6.h>
-// NEEDED BY do_uptime
-#include <time.h>
-
-// MacOS calculates load averages once every 5 seconds
-#define MIN_LOADAVG_UPDATE_EVERY 5
-
-int do_macos_sysctl(int update_every, usec_t dt) {
- static int do_loadavg = -1, do_swap = -1, do_bandwidth = -1,
- do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_ecn = -1,
- do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1,
- do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1,
- do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
- do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1,
- do_icmp6 = -1, do_icmp6_redir = -1, do_icmp6_errors = -1, do_icmp6_echos = -1,
- do_icmp6_router = -1, do_icmp6_neighbor = -1, do_icmp6_types = -1, do_uptime = -1;
-
-
- if (unlikely(do_loadavg == -1)) {
- do_loadavg = config_get_boolean("plugin:macos:sysctl", "enable load average", 1);
- do_swap = config_get_boolean("plugin:macos:sysctl", "system swap", 1);
- do_bandwidth = config_get_boolean("plugin:macos:sysctl", "bandwidth", 1);
- do_tcp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP packets", 1);
- do_tcp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP errors", 1);
- do_tcp_handshake = config_get_boolean("plugin:macos:sysctl", "ipv4 TCP handshake issues", 1);
- do_ecn = config_get_boolean_ondemand("plugin:macos:sysctl", "ECN packets", CONFIG_BOOLEAN_AUTO);
- do_tcpext_syscookies = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
- do_tcpext_ofo = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
- do_tcpext_connaborts = config_get_boolean_ondemand("plugin:macos:sysctl", "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
- do_udp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP packets", 1);
- do_udp_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 UDP errors", 1);
- do_icmp_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP packets", 1);
- do_icmpmsg = config_get_boolean("plugin:macos:sysctl", "ipv4 ICMP messages", 1);
- do_ip_packets = config_get_boolean("plugin:macos:sysctl", "ipv4 packets", 1);
- do_ip_fragsout = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments sent", 1);
- do_ip_fragsin = config_get_boolean("plugin:macos:sysctl", "ipv4 fragments assembly", 1);
- do_ip_errors = config_get_boolean("plugin:macos:sysctl", "ipv4 errors", 1);
- do_ip6_packets = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsout = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsin = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
- do_ip6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
- do_icmp6 = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp", CONFIG_BOOLEAN_AUTO);
- do_icmp6_redir = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp redirects", CONFIG_BOOLEAN_AUTO);
- do_icmp6_errors = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp errors", CONFIG_BOOLEAN_AUTO);
- do_icmp6_echos = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp echos", CONFIG_BOOLEAN_AUTO);
- do_icmp6_router = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp router", CONFIG_BOOLEAN_AUTO);
- do_icmp6_neighbor = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
- do_icmp6_types = config_get_boolean_ondemand("plugin:macos:sysctl", "icmp types", CONFIG_BOOLEAN_AUTO);
- do_uptime = config_get_boolean("plugin:macos:sysctl", "system uptime", 1);
- }
-
- RRDSET *st = NULL;
-
- int i;
- size_t size;
-
- // NEEDED BY: do_loadavg
- static usec_t next_loadavg_dt = 0;
- struct loadavg sysload;
-
- // NEEDED BY: do_swap
- struct xsw_usage swap_usage;
-
- // NEEDED BY: do_bandwidth
- int mib[6];
- static char *ifstatdata = NULL;
- char *lim, *next;
- struct if_msghdr *ifm;
- struct iftot {
- u_long ift_ibytes;
- u_long ift_obytes;
- } iftot = {0, 0};
-
- // NEEDED BY: do_tcp...
- struct tcpstat tcpstat;
-
- // NEEDED BY: do_udp...
- struct udpstat udpstat;
-
- // NEEDED BY: do_icmp...
- struct icmpstat icmpstat;
- struct icmp_total {
- u_long msgs_in;
- u_long msgs_out;
- } icmp_total = {0, 0};
-
- // NEEDED BY: do_ip...
- struct ipstat ipstat;
-
- // NEEDED BY: do_ip6...
- /*
- * Dirty workaround for /usr/include/netinet6/ip6_var.h absence.
- * Struct ip6stat was copied from bsd/netinet6/ip6_var.h from xnu sources.
- * Do the same for previously missing scope6_var.h on OS X < 10.11.
- */
-#define IP6S_SRCRULE_COUNT 16
-
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100)
-#ifndef _NETINET6_SCOPE6_VAR_H_
-#define _NETINET6_SCOPE6_VAR_H_
-#include <sys/appleapiopts.h>
-
-#define SCOPE6_ID_MAX 16
-#endif
-#else
-#include <netinet6/scope6_var.h>
-#endif
-
- struct ip6stat {
- u_quad_t ip6s_total; /* total packets received */
- u_quad_t ip6s_tooshort; /* packet too short */
- u_quad_t ip6s_toosmall; /* not enough data */
- u_quad_t ip6s_fragments; /* fragments received */
- u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */
- u_quad_t ip6s_fragtimeout; /* fragments timed out */
- u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */
- u_quad_t ip6s_forward; /* packets forwarded */
- u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */
- u_quad_t ip6s_redirectsent; /* packets forwarded on same net */
- u_quad_t ip6s_delivered; /* datagrams delivered to upper level */
- u_quad_t ip6s_localout; /* total ip packets generated here */
- u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */
- u_quad_t ip6s_reassembled; /* total packets reassembled ok */
- u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */
- u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */
- u_quad_t ip6s_ofragments; /* output fragments created */
- u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */
- u_quad_t ip6s_badoptions; /* error in option processing */
- u_quad_t ip6s_noroute; /* packets discarded due to no route */
- u_quad_t ip6s_badvers; /* ip6 version != 6 */
- u_quad_t ip6s_rawout; /* total raw ip packets generated */
- u_quad_t ip6s_badscope; /* scope error */
- u_quad_t ip6s_notmember; /* don't join this multicast group */
- u_quad_t ip6s_nxthist[256]; /* next header history */
- u_quad_t ip6s_m1; /* one mbuf */
- u_quad_t ip6s_m2m[32]; /* two or more mbuf */
- u_quad_t ip6s_mext1; /* one ext mbuf */
- u_quad_t ip6s_mext2m; /* two or more ext mbuf */
- u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */
- u_quad_t ip6s_nogif; /* no match gif found */
- u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */
-
- /*
- * statistics for improvement of the source address selection
- * algorithm:
- */
- /* number of times that address selection fails */
- u_quad_t ip6s_sources_none;
- /* number of times that an address on the outgoing I/F is chosen */
- u_quad_t ip6s_sources_sameif[SCOPE6_ID_MAX];
- /* number of times that an address on a non-outgoing I/F is chosen */
- u_quad_t ip6s_sources_otherif[SCOPE6_ID_MAX];
- /*
- * number of times that an address that has the same scope
- * from the destination is chosen.
- */
- u_quad_t ip6s_sources_samescope[SCOPE6_ID_MAX];
- /*
- * number of times that an address that has a different scope
- * from the destination is chosen.
- */
- u_quad_t ip6s_sources_otherscope[SCOPE6_ID_MAX];
- /* number of times that a deprecated address is chosen */
- u_quad_t ip6s_sources_deprecated[SCOPE6_ID_MAX];
-
- u_quad_t ip6s_forward_cachehit;
- u_quad_t ip6s_forward_cachemiss;
-
- /* number of times that each rule of source selection is applied. */
- u_quad_t ip6s_sources_rule[IP6S_SRCRULE_COUNT];
-
- /* number of times we ignored address on expensive secondary interfaces */
- u_quad_t ip6s_sources_skip_expensive_secondary_if;
-
- /* pkt dropped, no mbufs for control data */
- u_quad_t ip6s_pktdropcntrl;
-
- /* total packets trimmed/adjusted */
- u_quad_t ip6s_adj;
- /* hwcksum info discarded during adjustment */
- u_quad_t ip6s_adj_hwcsum_clr;
-
- /* duplicate address detection collisions */
- u_quad_t ip6s_dad_collide;
-
- /* DAD NS looped back */
- u_quad_t ip6s_dad_loopcount;
- } ip6stat;
-
- // NEEDED BY: do_icmp6...
- struct icmp6stat icmp6stat;
- struct icmp6_total {
- u_long msgs_in;
- u_long msgs_out;
- } icmp6_total = {0, 0};
-
- // NEEDED BY: do_uptime
- struct timespec boot_time, cur_time;
-
- if (next_loadavg_dt <= dt) {
- if (likely(do_loadavg)) {
- if (unlikely(GETSYSCTL_BY_NAME("vm.loadavg", sysload))) {
- do_loadavg = 0;
- collector_error("DISABLED: system.load");
- } else {
-
- st = rrdset_find_active_bytype_localhost("system", "load");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "load"
- , NULL
- , "load"
- , NULL
- , "System Load Average"
- , "load"
- , "macos.plugin"
- , "sysctl"
- , 100
- , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
- , RRDSET_TYPE_LINE
- );
- rrddim_add(st, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "load1", (collected_number) ((double)sysload.ldavg[0] / sysload.fscale * 1000));
- rrddim_set(st, "load5", (collected_number) ((double)sysload.ldavg[1] / sysload.fscale * 1000));
- rrddim_set(st, "load15", (collected_number) ((double)sysload.ldavg[2] / sysload.fscale * 1000));
- rrdset_done(st);
- }
- }
-
- next_loadavg_dt = st->update_every * USEC_PER_SEC;
- }
- else next_loadavg_dt -= dt;
-
- if (likely(do_swap)) {
- if (unlikely(GETSYSCTL_BY_NAME("vm.swapusage", swap_usage))) {
- do_swap = 0;
- collector_error("DISABLED: mem.swap");
- } else {
- st = rrdset_find_active_localhost("mem.swap");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "mem"
- , "swap"
- , NULL
- , "swap"
- , NULL
- , "System Swap"
- , "MiB"
- , "macos.plugin"
- , "sysctl"
- , 201
- , update_every
- , RRDSET_TYPE_STACKED
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
- rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "free", swap_usage.xsu_avail);
- rrddim_set(st, "used", swap_usage.xsu_used);
- rrdset_done(st);
- }
- }
-
- if (likely(do_bandwidth)) {
- mib[0] = CTL_NET;
- mib[1] = PF_ROUTE;
- mib[2] = 0;
- mib[3] = AF_INET;
- mib[4] = NET_RT_IFLIST2;
- mib[5] = 0;
- if (unlikely(sysctl(mib, 6, NULL, &size, NULL, 0))) {
- collector_error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
- do_bandwidth = 0;
- collector_error("DISABLED: system.ipv4");
- } else {
- ifstatdata = reallocz(ifstatdata, size);
- if (unlikely(sysctl(mib, 6, ifstatdata, &size, NULL, 0) < 0)) {
- collector_error("MACOS: sysctl(%s...) failed: %s", "net interfaces", strerror(errno));
- do_bandwidth = 0;
- collector_error("DISABLED: system.ipv4");
- } else {
- lim = ifstatdata + size;
- iftot.ift_ibytes = iftot.ift_obytes = 0;
- for (next = ifstatdata; next < lim; ) {
- ifm = (struct if_msghdr *)next;
- next += ifm->ifm_msglen;
-
- if (ifm->ifm_type == RTM_IFINFO2) {
- struct if_msghdr2 *if2m = (struct if_msghdr2 *)ifm;
-
- iftot.ift_ibytes += if2m->ifm_data.ifi_ibytes;
- iftot.ift_obytes += if2m->ifm_data.ifi_obytes;
- }
- }
- st = rrdset_find_active_localhost("system.ipv4");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "ipv4"
- , NULL
- , "network"
- , NULL
- , "IPv4 Bandwidth"
- , "kilobits/s"
- , "macos.plugin"
- , "sysctl"
- , 500
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InOctets", iftot.ift_ibytes);
- rrddim_set(st, "OutOctets", iftot.ift_obytes);
- rrdset_done(st);
- }
- }
- }
-
- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
- if (likely(do_tcp_packets || do_tcp_errors || do_tcp_handshake || do_tcpext_connaborts || do_tcpext_ofo || do_tcpext_syscookies || do_ecn)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet.tcp.stats", tcpstat))){
- do_tcp_packets = 0;
- collector_error("DISABLED: ipv4.tcppackets");
- do_tcp_errors = 0;
- collector_error("DISABLED: ipv4.tcperrors");
- do_tcp_handshake = 0;
- collector_error("DISABLED: ipv4.tcphandshake");
- do_tcpext_connaborts = 0;
- collector_error("DISABLED: ipv4.tcpconnaborts");
- do_tcpext_ofo = 0;
- collector_error("DISABLED: ipv4.tcpofo");
- do_tcpext_syscookies = 0;
- collector_error("DISABLED: ipv4.tcpsyncookies");
- do_ecn = 0;
- collector_error("DISABLED: ipv4.ecnpkts");
- } else {
- if (likely(do_tcp_packets)) {
- st = rrdset_find_active_localhost("ipv4.tcppackets");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcppackets"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Packets"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2600
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InSegs", tcpstat.tcps_rcvtotal);
- rrddim_set(st, "OutSegs", tcpstat.tcps_sndtotal);
- rrdset_done(st);
- }
-
- if (likely(do_tcp_errors)) {
- st = rrdset_find_active_localhost("ipv4.tcperrors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcperrors"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Errors"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2700
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InErrs", tcpstat.tcps_rcvbadoff + tcpstat.tcps_rcvshort);
- rrddim_set(st, "InCsumErrors", tcpstat.tcps_rcvbadsum);
- rrddim_set(st, "RetransSegs", tcpstat.tcps_sndrexmitpack);
- rrdset_done(st);
- }
-
- if (likely(do_tcp_handshake)) {
- st = rrdset_find_active_localhost("ipv4.tcphandshake");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcphandshake"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Handshake Issues"
- , "events/s"
- , "macos.plugin"
- , "sysctl"
- , 2900
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "EstabResets", tcpstat.tcps_drops);
- rrddim_set(st, "ActiveOpens", tcpstat.tcps_connattempt);
- rrddim_set(st, "PassiveOpens", tcpstat.tcps_accepts);
- rrddim_set(st, "AttemptFails", tcpstat.tcps_conndrops);
- rrdset_done(st);
- }
-
- if (do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_rcvpackafterwin ||
- tcpstat.tcps_rcvafterclose ||
- tcpstat.tcps_rcvmemdrop ||
- tcpstat.tcps_persistdrop ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv4.tcpconnaborts");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcpconnaborts"
- , NULL
- , "tcp"
- , NULL
- , "TCP Connection Aborts"
- , "connections/s"
- , "macos.plugin"
- , "sysctl"
- , 3010
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "TCPAbortOnData", tcpstat.tcps_rcvpackafterwin);
- rrddim_set(st, "TCPAbortOnClose", tcpstat.tcps_rcvafterclose);
- rrddim_set(st, "TCPAbortOnMemory", tcpstat.tcps_rcvmemdrop);
- rrddim_set(st, "TCPAbortOnTimeout", tcpstat.tcps_persistdrop);
- rrdset_done(st);
- }
-
- if (do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_rcvoopack ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_ofo = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv4.tcpofo");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcpofo"
- , NULL
- , "tcp"
- , NULL
- , "TCP Out-Of-Order Queue"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3050
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "TCPOFOQueue", tcpstat.tcps_rcvoopack);
- rrdset_done(st);
- }
-
- if (do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_sc_sendcookie ||
- tcpstat.tcps_sc_recvcookie ||
- tcpstat.tcps_sc_zonefail ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
-
- st = rrdset_find_active_localhost("ipv4.tcpsyncookies");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "tcpsyncookies"
- , NULL
- , "tcp"
- , NULL
- , "TCP SYN Cookies"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3100
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "SyncookiesRecv", tcpstat.tcps_sc_recvcookie);
- rrddim_set(st, "SyncookiesSent", tcpstat.tcps_sc_sendcookie);
- rrddim_set(st, "SyncookiesFailed", tcpstat.tcps_sc_zonefail);
- rrdset_done(st);
- }
-
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
- if (do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO &&
- (tcpstat.tcps_ecn_recv_ce ||
- tcpstat.tcps_ecn_not_supported ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ecn = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv4.ecnpkts");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "ecnpkts"
- , NULL
- , "ecn"
- , NULL
- , "IPv4 ECN Statistics"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 8700
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InCEPkts", tcpstat.tcps_ecn_recv_ce);
- rrddim_set(st, "InNoECTPkts", tcpstat.tcps_ecn_not_supported);
- rrdset_done(st);
- }
-#endif
-
- }
- }
-
- // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
- if (likely(do_udp_packets || do_udp_errors)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet.udp.stats", udpstat))) {
- do_udp_packets = 0;
- collector_error("DISABLED: ipv4.udppackets");
- do_udp_errors = 0;
- collector_error("DISABLED: ipv4.udperrors");
- } else {
- if (likely(do_udp_packets)) {
- st = rrdset_find_active_localhost("ipv4.udppackets");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "udppackets"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Packets"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2601
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InDatagrams", udpstat.udps_ipackets);
- rrddim_set(st, "OutDatagrams", udpstat.udps_opackets);
- rrdset_done(st);
- }
-
- if (likely(do_udp_errors)) {
- st = rrdset_find_active_localhost("ipv4.udperrors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "udperrors"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Errors"
- , "events/s"
- , "macos.plugin"
- , "sysctl"
- , 2701
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- }
-
- rrddim_set(st, "InErrors", udpstat.udps_hdrops + udpstat.udps_badlen);
- rrddim_set(st, "NoPorts", udpstat.udps_noport);
- rrddim_set(st, "RcvbufErrors", udpstat.udps_fullsock);
-#if (defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090)
- rrddim_set(st, "InCsumErrors", udpstat.udps_badsum + udpstat.udps_nosum);
- rrddim_set(st, "IgnoredMulti", udpstat.udps_filtermcast);
-#else
- rrddim_set(st, "InCsumErrors", udpstat.udps_badsum);
-#endif
- rrdset_done(st);
- }
- }
- }
-
- if (likely(do_icmp_packets || do_icmpmsg)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet.icmp.stats", icmpstat))) {
- do_icmp_packets = 0;
- collector_error("DISABLED: ipv4.icmp");
- collector_error("DISABLED: ipv4.icmp_errors");
- do_icmpmsg = 0;
- collector_error("DISABLED: ipv4.icmpmsg");
- } else {
- for (i = 0; i <= ICMP_MAXTYPE; i++) {
- icmp_total.msgs_in += icmpstat.icps_inhist[i];
- icmp_total.msgs_out += icmpstat.icps_outhist[i];
- }
- icmp_total.msgs_in += icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort;
-
- // --------------------------------------------------------------------
-
- if (likely(do_icmp_packets)) {
- st = rrdset_find_active_localhost("ipv4.icmp");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmp"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Packets"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2602
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InMsgs", icmp_total.msgs_in);
- rrddim_set(st, "OutMsgs", icmp_total.msgs_out);
- rrdset_done(st);
-
- st = rrdset_find_active_localhost("ipv4.icmp_errors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmp_errors"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Errors"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2603
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InErrors", icmpstat.icps_badcode + icmpstat.icps_badlen + icmpstat.icps_checksum + icmpstat.icps_tooshort);
- rrddim_set(st, "OutErrors", icmpstat.icps_error);
- rrddim_set(st, "InCsumErrors", icmpstat.icps_checksum);
- rrdset_done(st);
- }
-
- if (likely(do_icmpmsg)) {
- st = rrdset_find_active_localhost("ipv4.icmpmsg");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "icmpmsg"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Messages"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 2604
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InEchoReps", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutEchoReps", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InEchoReps", icmpstat.icps_inhist[ICMP_ECHOREPLY]);
- rrddim_set(st, "OutEchoReps", icmpstat.icps_outhist[ICMP_ECHOREPLY]);
- rrddim_set(st, "InEchos", icmpstat.icps_inhist[ICMP_ECHO]);
- rrddim_set(st, "OutEchos", icmpstat.icps_outhist[ICMP_ECHO]);
- rrdset_done(st);
- }
- }
- }
-
- // see also http://net-snmp.sourceforge.net/docs/mibs/ip.html
- if (likely(do_ip_packets || do_ip_fragsout || do_ip_fragsin || do_ip_errors)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet.ip.stats", ipstat))) {
- do_ip_packets = 0;
- collector_error("DISABLED: ipv4.packets");
- do_ip_fragsout = 0;
- collector_error("DISABLED: ipv4.fragsout");
- do_ip_fragsin = 0;
- collector_error("DISABLED: ipv4.fragsin");
- do_ip_errors = 0;
- collector_error("DISABLED: ipv4.errors");
- } else {
- if (likely(do_ip_packets)) {
- st = rrdset_find_active_localhost("ipv4.packets");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "packets"
- , NULL
- , "packets"
- , NULL
- , "IPv4 Packets"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3000
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "OutRequests", ipstat.ips_localout);
- rrddim_set(st, "InReceives", ipstat.ips_total);
- rrddim_set(st, "ForwDatagrams", ipstat.ips_forward);
- rrddim_set(st, "InDelivers", ipstat.ips_delivered);
- rrdset_done(st);
- }
-
- if (likely(do_ip_fragsout)) {
- st = rrdset_find_active_localhost("ipv4.fragsout");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "fragsout"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 Fragments Sent"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3010
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "FragOKs", ipstat.ips_fragmented);
- rrddim_set(st, "FragFails", ipstat.ips_cantfrag);
- rrddim_set(st, "FragCreates", ipstat.ips_ofragments);
- rrdset_done(st);
- }
-
- if (likely(do_ip_fragsin)) {
- st = rrdset_find_active_localhost("ipv4.fragsin");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "fragsin"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 Fragments Reassembly"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3011
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "ReasmOKs", ipstat.ips_fragments);
- rrddim_set(st, "ReasmFails", ipstat.ips_fragdropped);
- rrddim_set(st, "ReasmReqds", ipstat.ips_reassembled);
- rrdset_done(st);
- }
-
- if (likely(do_ip_errors)) {
- st = rrdset_find_active_localhost("ipv4.errors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "errors"
- , NULL
- , "errors"
- , NULL
- , "IPv4 Errors"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3002
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InDiscards", ipstat.ips_badsum + ipstat.ips_tooshort + ipstat.ips_toosmall + ipstat.ips_toolong);
- rrddim_set(st, "OutDiscards", ipstat.ips_odropped);
- rrddim_set(st, "InHdrErrors", ipstat.ips_badhlen + ipstat.ips_badlen + ipstat.ips_badoptions + ipstat.ips_badvers);
- rrddim_set(st, "InAddrErrors", ipstat.ips_badaddr);
- rrddim_set(st, "InUnknownProtos", ipstat.ips_noproto);
- rrddim_set(st, "OutNoRoutes", ipstat.ips_noroute);
- rrdset_done(st);
- }
- }
- }
-
- if (likely(do_ip6_packets || do_ip6_fragsout || do_ip6_fragsin || do_ip6_errors)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet6.ip6.stats", ip6stat))) {
- do_ip6_packets = 0;
- collector_error("DISABLED: ipv6.packets");
- do_ip6_fragsout = 0;
- collector_error("DISABLED: ipv6.fragsout");
- do_ip6_fragsin = 0;
- collector_error("DISABLED: ipv6.fragsin");
- do_ip6_errors = 0;
- collector_error("DISABLED: ipv6.errors");
- } else {
- if (do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_localout ||
- ip6stat.ip6s_total ||
- ip6stat.ip6s_forward ||
- ip6stat.ip6s_delivered ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_packets = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.packets");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "packets"
- , NULL
- , "packets"
- , NULL
- , "IPv6 Packets"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3000
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "forwarded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "delivers", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "sent", ip6stat.ip6s_localout);
- rrddim_set(st, "received", ip6stat.ip6s_total);
- rrddim_set(st, "forwarded", ip6stat.ip6s_forward);
- rrddim_set(st, "delivers", ip6stat.ip6s_delivered);
- rrdset_done(st);
- }
-
- if (do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_fragmented ||
- ip6stat.ip6s_cantfrag ||
- ip6stat.ip6s_ofragments ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsout = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.fragsout");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "fragsout"
- , NULL
- , "fragments"
- , NULL
- , "IPv6 Fragments Sent"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3010
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "ok", ip6stat.ip6s_fragmented);
- rrddim_set(st, "failed", ip6stat.ip6s_cantfrag);
- rrddim_set(st, "all", ip6stat.ip6s_ofragments);
- rrdset_done(st);
- }
-
- if (do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_reassembled ||
- ip6stat.ip6s_fragdropped ||
- ip6stat.ip6s_fragtimeout ||
- ip6stat.ip6s_fragments ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsin = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.fragsin");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "fragsin"
- , NULL
- , "fragments"
- , NULL
- , "IPv6 Fragments Reassembly"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3011
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "ok", ip6stat.ip6s_reassembled);
- rrddim_set(st, "failed", ip6stat.ip6s_fragdropped);
- rrddim_set(st, "timeout", ip6stat.ip6s_fragtimeout);
- rrddim_set(st, "all", ip6stat.ip6s_fragments);
- rrdset_done(st);
- }
-
- if (do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO &&
- (ip6stat.ip6s_toosmall ||
- ip6stat.ip6s_odropped ||
- ip6stat.ip6s_badoptions ||
- ip6stat.ip6s_badvers ||
- ip6stat.ip6s_exthdrtoolong ||
- ip6stat.ip6s_sources_none ||
- ip6stat.ip6s_tooshort ||
- ip6stat.ip6s_cantforward ||
- ip6stat.ip6s_noroute ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_errors = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.errors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "errors"
- , NULL
- , "errors"
- , NULL
- , "IPv6 Errors"
- , "packets/s"
- , "macos.plugin"
- , "sysctl"
- , 3002
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InDiscards", ip6stat.ip6s_toosmall);
- rrddim_set(st, "OutDiscards", ip6stat.ip6s_odropped);
-
- rrddim_set(st, "InHdrErrors",
- ip6stat.ip6s_badoptions + ip6stat.ip6s_badvers + ip6stat.ip6s_exthdrtoolong);
- rrddim_set(st, "InAddrErrors", ip6stat.ip6s_sources_none);
- rrddim_set(st, "InTruncatedPkts", ip6stat.ip6s_tooshort);
- rrddim_set(st, "InNoRoutes", ip6stat.ip6s_cantforward);
-
- rrddim_set(st, "OutNoRoutes", ip6stat.ip6s_noroute);
- rrdset_done(st);
- }
- }
- }
-
- if (likely(do_icmp6 || do_icmp6_redir || do_icmp6_errors || do_icmp6_echos || do_icmp6_router || do_icmp6_neighbor || do_icmp6_types)) {
- if (unlikely(GETSYSCTL_BY_NAME("net.inet6.icmp6.stats", icmp6stat))) {
- do_icmp6 = 0;
- collector_error("DISABLED: ipv6.icmp");
- } else {
- for (i = 0; i <= ICMP6_MAXTYPE; i++) {
- icmp6_total.msgs_in += icmp6stat.icp6s_inhist[i];
- icmp6_total.msgs_out += icmp6stat.icp6s_outhist[i];
- }
- icmp6_total.msgs_in += icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort;
- if (do_icmp6 == CONFIG_BOOLEAN_YES || (do_icmp6 == CONFIG_BOOLEAN_AUTO &&
- (icmp6_total.msgs_in ||
- icmp6_total.msgs_out ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6 = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmp");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmp"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 ICMP Messages"
- , "messages/s"
- , "macos.plugin"
- , "sysctl"
- , 10000
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "sent", icmp6_total.msgs_in);
- rrddim_set(st, "received", icmp6_total.msgs_out);
- rrdset_done(st);
- }
-
- if (do_icmp6_redir == CONFIG_BOOLEAN_YES || (do_icmp6_redir == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_REDIRECT] ||
- icmp6stat.icp6s_outhist[ND_REDIRECT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_redir = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmpredir");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmpredir"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 ICMP Redirects"
- , "redirects/s"
- , "macos.plugin"
- , "sysctl"
- , 10050
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "sent", icmp6stat.icp6s_inhist[ND_REDIRECT]);
- rrddim_set(st, "received", icmp6stat.icp6s_outhist[ND_REDIRECT]);
- rrdset_done(st);
- }
-
- if (do_icmp6_errors == CONFIG_BOOLEAN_YES || (do_icmp6_errors == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_badcode ||
- icmp6stat.icp6s_badlen ||
- icmp6stat.icp6s_checksum ||
- icmp6stat.icp6s_tooshort ||
- icmp6stat.icp6s_error ||
- icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH] ||
- icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED] ||
- icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB] ||
- icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH] ||
- icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED] ||
- icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_errors = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmperrors");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmperrors"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 ICMP Errors"
- , "errors/s"
- , "macos.plugin"
- , "sysctl"
- , 10100
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InErrors", icmp6stat.icp6s_badcode + icmp6stat.icp6s_badlen + icmp6stat.icp6s_checksum + icmp6stat.icp6s_tooshort);
- rrddim_set(st, "OutErrors", icmp6stat.icp6s_error);
- rrddim_set(st, "InCsumErrors", icmp6stat.icp6s_checksum);
- rrddim_set(st, "InDestUnreachs", icmp6stat.icp6s_inhist[ICMP6_DST_UNREACH]);
- rrddim_set(st, "InPktTooBigs", icmp6stat.icp6s_badlen);
- rrddim_set(st, "InTimeExcds", icmp6stat.icp6s_inhist[ICMP6_TIME_EXCEEDED]);
- rrddim_set(st, "InParmProblems", icmp6stat.icp6s_inhist[ICMP6_PARAM_PROB]);
- rrddim_set(st, "OutDestUnreachs", icmp6stat.icp6s_outhist[ICMP6_DST_UNREACH]);
- rrddim_set(st, "OutTimeExcds", icmp6stat.icp6s_outhist[ICMP6_TIME_EXCEEDED]);
- rrddim_set(st, "OutParmProblems", icmp6stat.icp6s_outhist[ICMP6_PARAM_PROB]);
- rrdset_done(st);
- }
-
- if (do_icmp6_echos == CONFIG_BOOLEAN_YES || (do_icmp6_echos == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST] ||
- icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST] ||
- icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY] ||
- icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_echos = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmpechos");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmpechos"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 ICMP Echo"
- , "messages/s"
- , "macos.plugin"
- , "sysctl"
- , 10200
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InEchos", icmp6stat.icp6s_inhist[ICMP6_ECHO_REQUEST]);
- rrddim_set(st, "OutEchos", icmp6stat.icp6s_outhist[ICMP6_ECHO_REQUEST]);
- rrddim_set(st, "InEchoReplies", icmp6stat.icp6s_inhist[ICMP6_ECHO_REPLY]);
- rrddim_set(st, "OutEchoReplies", icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]);
- rrdset_done(st);
- }
-
- if (do_icmp6_router == CONFIG_BOOLEAN_YES || (do_icmp6_router == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT] ||
- icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT] ||
- icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT] ||
- icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_router = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmprouter");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmprouter"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 Router Messages"
- , "messages/s"
- , "macos.plugin"
- , "sysctl"
- , 10400
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_ROUTER_SOLICIT]);
- rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_ROUTER_SOLICIT]);
- rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_ROUTER_ADVERT]);
- rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_ROUTER_ADVERT]);
- rrdset_done(st);
- }
-
- if (do_icmp6_neighbor == CONFIG_BOOLEAN_YES || (do_icmp6_neighbor == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT] ||
- icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT] ||
- icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT] ||
- icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_neighbor = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmpneighbor");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmpneighbor"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 Neighbor Messages"
- , "messages/s"
- , "macos.plugin"
- , "sysctl"
- , 10500
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InSolicits", icmp6stat.icp6s_inhist[ND_NEIGHBOR_SOLICIT]);
- rrddim_set(st, "OutSolicits", icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
- rrddim_set(st, "InAdvertisements", icmp6stat.icp6s_inhist[ND_NEIGHBOR_ADVERT]);
- rrddim_set(st, "OutAdvertisements", icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]);
- }
-
- if (do_icmp6_types == CONFIG_BOOLEAN_YES || (do_icmp6_types == CONFIG_BOOLEAN_AUTO &&
- (icmp6stat.icp6s_inhist[1] ||
- icmp6stat.icp6s_inhist[128] ||
- icmp6stat.icp6s_inhist[129] ||
- icmp6stat.icp6s_inhist[136] ||
- icmp6stat.icp6s_outhist[1] ||
- icmp6stat.icp6s_outhist[128] ||
- icmp6stat.icp6s_outhist[129] ||
- icmp6stat.icp6s_outhist[133] ||
- icmp6stat.icp6s_outhist[135] ||
- icmp6stat.icp6s_outhist[136] ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp6_types = CONFIG_BOOLEAN_YES;
- st = rrdset_find_active_localhost("ipv6.icmptypes");
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "icmptypes"
- , NULL
- , "icmp"
- , NULL
- , "IPv6 ICMP Types"
- , "messages/s"
- , "macos.plugin"
- , "sysctl"
- , 10700
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "InType1", icmp6stat.icp6s_inhist[1]);
- rrddim_set(st, "InType128", icmp6stat.icp6s_inhist[128]);
- rrddim_set(st, "InType129", icmp6stat.icp6s_inhist[129]);
- rrddim_set(st, "InType136", icmp6stat.icp6s_inhist[136]);
- rrddim_set(st, "OutType1", icmp6stat.icp6s_outhist[1]);
- rrddim_set(st, "OutType128", icmp6stat.icp6s_outhist[128]);
- rrddim_set(st, "OutType129", icmp6stat.icp6s_outhist[129]);
- rrddim_set(st, "OutType133", icmp6stat.icp6s_outhist[133]);
- rrddim_set(st, "OutType135", icmp6stat.icp6s_outhist[135]);
- rrddim_set(st, "OutType143", icmp6stat.icp6s_outhist[143]);
- rrdset_done(st);
- }
- }
- }
-
- if (likely(do_uptime)) {
- if (unlikely(GETSYSCTL_BY_NAME("kern.boottime", boot_time))) {
- do_uptime = 0;
- collector_error("DISABLED: system.uptime");
- } else {
- clock_gettime(CLOCK_REALTIME, &cur_time);
- st = rrdset_find_active_localhost("system.uptime");
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "uptime"
- , NULL
- , "uptime"
- , NULL
- , "System Uptime"
- , "seconds"
- , "macos.plugin"
- , "sysctl"
- , 1000
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrddim_add(st, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set(st, "uptime", cur_time.tv_sec - boot_time.tv_sec);
- rrdset_done(st);
- }
- }
-
- return 0;
-}
diff --git a/collectors/macos.plugin/metadata.yaml b/collectors/macos.plugin/metadata.yaml
deleted file mode 100644
index cc159ad1f..000000000
--- a/collectors/macos.plugin/metadata.yaml
+++ /dev/null
@@ -1,727 +0,0 @@
-plugin_name: macos.plugin
-modules:
- - meta:
- plugin_name: macos.plugin
- module_name: mach_smi
- monitored_instance:
- name: macOS
- link: "https://www.apple.com/macos"
- categories:
- - data-collection.macos-systems
- icon_filename: "macos.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - macos
- - apple
- - darwin
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor macOS metrics for efficient operating system performance."
- method_description: |
- The plugin uses three different methods to collect data:
- - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.
- - The functtion `host_statistic` is called to collect CPU and Virtual memory data;
- - The function `IOServiceGetMatchingServices` to collect storage information.
- supported_platforms:
- include:
- - macOS
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- description: "The netdata main configuration file."
- options:
- description: |
- There are three sections in the file which you can configure:
-
- - `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.
- - `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.
- - `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: enable load average
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of load average metrics (load1, load5, load15).
- default_value: yes
- required: false
- - name: system swap
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of system swap metrics (free, used).
- default_value: yes
- required: false
- - name: bandwidth
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of network bandwidth metrics (received, sent).
- default_value: yes
- required: false
- - name: ipv4 TCP packets
- description: Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent).
- section_name: plugin:macos:sysctl
- default_value: yes
- required: false
- - name: ipv4 TCP errors
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments).
- default_value: yes
- required: false
- - name: ipv4 TCP handshake issues
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails).
- default_value: yes
- required: false
- - name: ECN packets
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts).
- default_value: auto
- required: false
- - name: TCP SYN cookies
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed).
- default_value: auto
- required: false
- - name: TCP out-of-order queue
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of TCP out-of-order queue metrics (inqueue).
- default_value: auto
- required: false
- - name: TCP connection aborts
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout).
- default_value: auto
- required: false
- - name: ipv4 UDP packets
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.).
- default_value: yes
- required: false
- - name: ipv4 UDP errors
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi).
- default_value: yes
- required: false
- - name: ipv4 icmp packets
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error).
- default_value: yes
- required: false
- - name: ipv4 icmp messages
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum).
- default_value: yes
- required: false
- - name: ipv4 packets
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered).
- default_value: yes
- required: false
- - name: ipv4 fragments sent
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates).
- default_value: yes
- required: false
- - name: ipv4 fragments assembly
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all).
- default_value: yes
- required: false
- - name: ipv4 errors
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes).
- default_value: yes
- required: false
- - name: ipv6 packets
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered).
- default_value: auto
- required: false
- - name: ipv6 fragments sent
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all).
- default_value: auto
- required: false
- - name: ipv6 fragments assembly
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all).
- default_value: auto
- required: false
- - name: ipv6 errors
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes).
- default_value: auto
- required: false
- - name: icmp
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP metrics (sent, received).
- default_value: auto
- required: false
- - name: icmp redirects
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP redirects metrics (received, sent).
- default_value: auto
- required: false
- - name: icmp errors
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.).
- default_value: auto
- required: false
- - name: icmp echos
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply).
- default_value: auto
- required: false
- - name: icmp router
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements).
- default_value: auto
- required: false
- - name: icmp neighbor
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements).
- default_value: auto
- required: false
- - name: icmp types
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145).
- default_value: auto
- required: false
- - name: space usage for all disks
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root).
- default_value: yes
- required: false
- - name: inodes usage for all disks
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root).
- default_value: yes
- required: false
- - name: bandwidth
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of bandwidth metrics (received, sent).
- default_value: yes
- required: false
- - name: system uptime
- section_name: plugin:macos:sysctl
- description: Enable or disable monitoring of system uptime metrics (uptime).
- default_value: yes
- required: false
- - name: cpu utilization
- section_name: plugin:macos:mach_smi
- description: Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel).
- default_value: yes
- required: false
- - name: system ram
- section_name: plugin:macos:mach_smi
- description: Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free).
- default_value: yes
- required: false
- - name: swap i/o
- section_name: plugin:macos:mach_smi
- description: Enable or disable monitoring of SWAP I/O metrics (I/O Swap).
- default_value: yes
- required: false
- - name: memory page faults
- section_name: plugin:macos:mach_smi
- description: Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge).
- default_value: yes
- required: false
- - name: disk i/o
- section_name: plugin:macos:iokit
- description: Enable or disable monitoring of disk I/O metrics (In, Out).
- default_value: yes
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Disable swap monitoring.
- folding:
- enabled: true
- description: A basic example that discards swap monitoring
- config: |
- [plugin:macos:sysctl]
- system swap = no
- [plugin:macos:mach_smi]
- swap i/o = no
- - name: Disable complete Machine SMI section.
- folding:
- enabled: true
- description: A basic example that discards swap monitoring
- config: |
- [plugin:macos:mach_smi]
- cpu utilization = no
- system ram = no
- swap i/o = no
- memory page faults = no
- disk i/o = no
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: interface_speed
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.net
- info: network interface ${label:device} current speed
- os: "*"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to hardware and network monitoring.
- labels: []
- metrics:
- - name: system.cpu
- description: Total CPU utilization
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: user
- - name: nice
- - name: system
- - name: idle
- - name: system.ram
- description: System RAM
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: active
- - name: wired
- - name: throttled
- - name: compressor
- - name: inactive
- - name: purgeable
- - name: speculative
- - name: free
- - name: mem.swapio
- description: Swap I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: io
- - name: out
- - name: mem.pgfaults
- description: Memory Page Faults
- unit: "faults/s"
- chart_type: line
- dimensions:
- - name: memory
- - name: cow
- - name: pagein
- - name: pageout
- - name: compress
- - name: decompress
- - name: zero_fill
- - name: reactivate
- - name: purge
- - name: system.load
- description: System Load Average
- unit: "load"
- chart_type: line
- dimensions:
- - name: load1
- - name: load5
- - name: load15
- - name: mem.swap
- description: System Swap
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: system.ipv4
- description: IPv4 Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.tcppackets
- description: IPv4 TCP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.tcperrors
- description: IPv4 TCP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrs
- - name: InCsumErrors
- - name: RetransSegs
- - name: ipv4.tcphandshake
- description: IPv4 TCP Handshake Issues
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: EstabResets
- - name: ActiveOpens
- - name: PassiveOpens
- - name: AttemptFails
- - name: ipv4.tcpconnaborts
- description: TCP Connection Aborts
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: baddata
- - name: userclosed
- - name: nomemory
- - name: timeout
- - name: ipv4.tcpofo
- description: TCP Out-Of-Order Queue
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: inqueue
- - name: ipv4.tcpsyncookies
- description: TCP SYN Cookies
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: failed
- - name: ipv4.ecnpkts
- description: IPv4 ECN Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: CEP
- - name: NoECTP
- - name: ipv4.udppackets
- description: IPv4 UDP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.udperrors
- description: IPv4 UDP Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: RcvbufErrors
- - name: InErrors
- - name: NoPorts
- - name: InCsumErrors
- - name: IgnoredMulti
- - name: ipv4.icmp
- description: IPv4 ICMP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.icmp_errors
- description: IPv4 ICMP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: ipv4.icmpmsg
- description: IPv4 ICMP Messages
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InEchoReps
- - name: OutEchoReps
- - name: InEchos
- - name: OutEchos
- - name: ipv4.packets
- description: IPv4 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivered
- - name: ipv4.fragsout
- description: IPv4 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: created
- - name: ipv4.fragsin
- description: IPv4 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv4.errors
- description: IPv4 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InHdrErrors
- - name: OutNoRoutes
- - name: InAddrErrors
- - name: InUnknownProtos
- - name: ipv6.packets
- description: IPv6 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivers
- - name: ipv6.fragsout
- description: IPv6 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv6.fragsin
- description: IPv6 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: timeout
- - name: all
- - name: ipv6.errors
- description: IPv6 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InHdrErrors
- - name: InAddrErrors
- - name: InTruncatedPkts
- - name: InNoRoutes
- - name: OutNoRoutes
- - name: ipv6.icmp
- description: IPv6 ICMP Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmpredir
- description: IPv6 ICMP Redirects
- unit: "redirects/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmperrors
- description: IPv6 ICMP Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: InDestUnreachs
- - name: InPktTooBigs
- - name: InTimeExcds
- - name: InParmProblems
- - name: OutDestUnreachs
- - name: OutTimeExcds
- - name: OutParmProblems
- - name: ipv6.icmpechos
- description: IPv6 ICMP Echo
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InEchos
- - name: OutEchos
- - name: InEchoReplies
- - name: OutEchoReplies
- - name: ipv6.icmprouter
- description: IPv6 Router Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmpneighbor
- description: IPv6 Neighbor Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmptypes
- description: IPv6 ICMP Types
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InType1
- - name: InType128
- - name: InType129
- - name: InType136
- - name: OutType1
- - name: OutType128
- - name: OutType129
- - name: OutType133
- - name: OutType135
- - name: OutType143
- - name: system.uptime
- description: System Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - name: system.io
- description: Disk I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: disk
- description: ""
- labels: []
- metrics:
- - name: disk.io
- description: Disk I/O Bandwidth
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: writes
- - name: disk.ops
- description: Disk Completed I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: read
- - name: writes
- - name: disk.util
- description: Disk Utilization Time
- unit: "% of time working"
- chart_type: area
- dimensions:
- - name: utilization
- - name: disk.iotime
- description: Disk Total I/O Time
- unit: "milliseconds/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk.await
- description: Average Completed I/O Operation Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk.avgsz
- description: Average Completed I/O Operation Bandwidth
- unit: "KiB/operation"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk.svctm
- description: Average Service Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: svctm
- - name: mount point
- description: ""
- labels: []
- metrics:
- - name: disk.space
- description: Disk Space Usage for {mounted dir} [{mounted filesystem}]
- unit: "GiB"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
- - name: disk.inodes
- description: Disk Files (inodes) Usage for {mounted dir} [{mounted filesystem}]
- unit: "inodes"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: reserved_for_root
- - name: network device
- description: ""
- labels: []
- metrics:
- - name: net.net
- description: Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: net.packets
- description: Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast_received
- - name: multicast_sent
- - name: net.errors
- description: Interface Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: net.drops
- description: Interface Drops
- unit: "drops/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: net.events
- description: Network Interface Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: frames
- - name: collisions
- - name: carrier
diff --git a/collectors/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c
deleted file mode 100644
index 3aaa46c72..000000000
--- a/collectors/macos.plugin/plugin_macos.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_macos.h"
-
-static struct macos_module {
- const char *name;
- const char *dim;
-
- int enabled;
-
- int (*func)(int update_every, usec_t dt);
-
- RRDDIM *rd;
-
-} macos_modules[] = {
- {.name = "sysctl", .dim = "sysctl", .enabled = 1, .func = do_macos_sysctl},
- {.name = "mach system management interface", .dim = "mach_smi", .enabled = 1, .func = do_macos_mach_smi},
- {.name = "iokit", .dim = "iokit", .enabled = 1, .func = do_macos_iokit},
-
- // the terminator of this array
- {.name = NULL, .dim = NULL, .enabled = 0, .func = NULL}
-};
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 3
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3
-#endif
-
-static void macos_main_cleanup(void *ptr)
-{
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *macos_main(void *ptr)
-{
- worker_register("MACOS");
-
- netdata_thread_cleanup_push(macos_main_cleanup, ptr);
-
- // check the enabled status for each module
- for (int i = 0; macos_modules[i].name; i++) {
- struct macos_module *pm = &macos_modules[i];
-
- pm->enabled = config_get_boolean("plugin:macos", pm->name, pm->enabled);
- pm->rd = NULL;
-
- worker_register_job_name(i, macos_modules[i].dim);
- }
-
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- while (!netdata_exit) {
- worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
-
- for (int i = 0; macos_modules[i].name; i++) {
- struct macos_module *pm = &macos_modules[i];
- if (unlikely(!pm->enabled))
- continue;
-
- netdata_log_debug(D_PROCNETDEV_LOOP, "macos calling %s.", pm->name);
-
- worker_is_busy(i);
- pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
-
- if (unlikely(netdata_exit))
- break;
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h
deleted file mode 100644
index 2c673a224..000000000
--- a/collectors/macos.plugin/plugin_macos.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_MACOS_H
-#define NETDATA_PLUGIN_MACOS_H 1
-
-#include "daemon/common.h"
-
-int do_macos_sysctl(int update_every, usec_t dt);
-int do_macos_mach_smi(int update_every, usec_t dt);
-int do_macos_iokit(int update_every, usec_t dt);
-
-#endif /* NETDATA_PLUGIN_MACOS_H */
diff --git a/collectors/nfacct.plugin/Makefile.am b/collectors/nfacct.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/nfacct.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
deleted file mode 120000
index ea320d139..000000000
--- a/collectors/nfacct.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/netfilter.md \ No newline at end of file
diff --git a/collectors/nfacct.plugin/integrations/netfilter.md b/collectors/nfacct.plugin/integrations/netfilter.md
deleted file mode 100644
index 831b6fb5b..000000000
--- a/collectors/nfacct.plugin/integrations/netfilter.md
+++ /dev/null
@@ -1,132 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/nfacct.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/nfacct.plugin/metadata.yaml"
-sidebar_label: "Netfilter"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Firewall"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Netfilter
-
-
-<img src="https://netdata.cloud/img/netfilter.png" width="150"/>
-
-
-Plugin: nfacct.plugin
-Module: nfacct.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.
-
-Netdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-This plugin needs setuid.
-
-### Default Behavior
-
-#### Auto-Detection
-
-This plugin uses socket to connect with netfilter to collect data
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Netfilter instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| netfilter.netlink_new | new, ignore, invalid | connections/s |
-| netfilter.netlink_changes | insert, delete, delete_list | changes/s |
-| netfilter.netlink_search | searched, search_restart, found | searches/s |
-| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |
-| netfilter.netlink_expect | created, deleted, new | expectations/s |
-| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |
-| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install required packages
-
-Install `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:nfacct]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Additinal parameters for collector | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/nfacct.plugin/metadata.yaml b/collectors/nfacct.plugin/metadata.yaml
deleted file mode 100644
index 943471a37..000000000
--- a/collectors/nfacct.plugin/metadata.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
-plugin_name: nfacct.plugin
-modules:
- - meta:
- plugin_name: nfacct.plugin
- module_name: nfacct.plugin
- monitored_instance:
- name: Netfilter
- link: 'https://www.netfilter.org/'
- categories:
- - data-collection.linux-systems.firewall-metrics
- icon_filename: 'netfilter.png'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'Monitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.'
- method_description: 'Netdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: 'This plugin needs setuid.'
- default_behavior:
- auto_detection:
- description: 'This plugin uses socket to connect with netfilter to collect data'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: Install required packages
- description: |
- Install `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.
- configuration:
- file:
- name: 'netdata.conf'
- section_name: '[plugin:nfacct]'
- description: 'This is netdata main configuration file'
- options:
- description: ''
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Additinal parameters for collector
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: netfilter.netlink_new
- description: Connection Tracker New Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: new
- - name: ignore
- - name: invalid
- - name: netfilter.netlink_changes
- description: Connection Tracker Changes
- unit: "changes/s"
- chart_type: line
- dimensions:
- - name: insert
- - name: delete
- - name: delete_list
- - name: netfilter.netlink_search
- description: Connection Tracker Searches
- unit: "searches/s"
- chart_type: line
- dimensions:
- - name: searched
- - name: search_restart
- - name: found
- - name: netfilter.netlink_errors
- description: Connection Tracker Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: icmp_error
- - name: insert_failed
- - name: drop
- - name: early_drop
- - name: netfilter.netlink_expect
- description: Connection Tracker Expectations
- unit: "expectations/s"
- chart_type: line
- dimensions:
- - name: created
- - name: deleted
- - name: new
- - name: netfilter.nfacct_packets
- description: Netfilter Accounting Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: a dimension per nfacct object
- - name: netfilter.nfacct_bytes
- description: Netfilter Accounting Bandwidth
- unit: "kilobytes/s"
- chart_type: line
- dimensions:
- - name: a dimension per nfacct object
diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c
deleted file mode 100644
index 2863cd7eb..000000000
--- a/collectors/nfacct.plugin/plugin_nfacct.c
+++ /dev/null
@@ -1,879 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#include <linux/netfilter/nfnetlink_conntrack.h>
-#include <libmnl/libmnl.h>
-#include <libnetfilter_acct/libnetfilter_acct.h>
-
-#define PLUGIN_NFACCT_NAME "nfacct.plugin"
-
-#define NETDATA_CHART_PRIO_NETFILTER_NEW 8701
-#define NETDATA_CHART_PRIO_NETFILTER_CHANGES 8702
-#define NETDATA_CHART_PRIO_NETFILTER_EXPECT 8703
-#define NETDATA_CHART_PRIO_NETFILTER_ERRORS 8705
-#define NETDATA_CHART_PRIO_NETFILTER_SEARCH 8710
-
-#define NETDATA_CHART_PRIO_NETFILTER_PACKETS 8906
-#define NETDATA_CHART_PRIO_NETFILTER_BYTES 8907
-
-#define NFACCT_RESTART_EVERY_SECONDS 86400 // restart the plugin every this many seconds
-
-static inline size_t mnl_buffer_size() {
- long s = MNL_SOCKET_BUFFER_SIZE;
- if(s <= 0) return 8192;
- return (size_t)s;
-}
-
-// variables
-static int debug = 0;
-static int netdata_update_every = 1;
-
-#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
-#define RRD_TYPE_NET_STAT_CONNTRACK "netlink"
-
-static struct {
- int update_every;
- char *buf;
- size_t buf_size;
- struct mnl_socket *mnl;
- struct nlmsghdr *nlh;
- struct nfgenmsg *nfh;
- unsigned int seq;
- uint32_t portid;
-
- struct nlattr *tb[CTA_STATS_MAX+1];
- const char *attr2name[CTA_STATS_MAX+1];
- kernel_uint_t metrics[CTA_STATS_MAX+1];
-
- struct nlattr *tb_exp[CTA_STATS_EXP_MAX+1];
- const char *attr2name_exp[CTA_STATS_EXP_MAX+1];
- kernel_uint_t metrics_exp[CTA_STATS_EXP_MAX+1];
-} nfstat_root = {
- .update_every = 1,
- .buf = NULL,
- .buf_size = 0,
- .mnl = NULL,
- .nlh = NULL,
- .nfh = NULL,
- .seq = 0,
- .portid = 0,
- .tb = {},
- .attr2name = {
- [CTA_STATS_SEARCHED] = "searched",
- [CTA_STATS_FOUND] = "found",
- [CTA_STATS_NEW] = "new",
- [CTA_STATS_INVALID] = "invalid",
- [CTA_STATS_IGNORE] = "ignore",
- [CTA_STATS_DELETE] = "delete",
- [CTA_STATS_DELETE_LIST] = "delete_list",
- [CTA_STATS_INSERT] = "insert",
- [CTA_STATS_INSERT_FAILED] = "insert_failed",
- [CTA_STATS_DROP] = "drop",
- [CTA_STATS_EARLY_DROP] = "early_drop",
- [CTA_STATS_ERROR] = "icmp_error",
- [CTA_STATS_SEARCH_RESTART] = "search_restart",
- },
- .metrics = {},
- .tb_exp = {},
- .attr2name_exp = {
- [CTA_STATS_EXP_NEW] = "new",
- [CTA_STATS_EXP_CREATE] = "created",
- [CTA_STATS_EXP_DELETE] = "deleted",
- },
- .metrics_exp = {}
-};
-
-
-static int nfstat_init(int update_every) {
- nfstat_root.update_every = update_every;
-
- nfstat_root.buf_size = mnl_buffer_size();
- nfstat_root.buf = mallocz(nfstat_root.buf_size);
-
- nfstat_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
- if(!nfstat_root.mnl) {
- collector_error("NFSTAT: mnl_socket_open() failed");
- return 1;
- }
-
- nfstat_root.seq = (unsigned int)now_realtime_sec() - 1;
-
- if(mnl_socket_bind(nfstat_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
- collector_error("NFSTAT: mnl_socket_bind() failed");
- return 1;
- }
- nfstat_root.portid = mnl_socket_get_portid(nfstat_root.mnl);
-
- return 0;
-}
-
-static struct nlmsghdr * nfct_mnl_nlmsghdr_put(char *buf, uint16_t subsys, uint16_t type, uint8_t family, uint32_t seq) {
- struct nlmsghdr *nlh;
- struct nfgenmsg *nfh;
-
- nlh = mnl_nlmsg_put_header(buf);
- nlh->nlmsg_type = (subsys << 8) | type;
- nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_DUMP;
- nlh->nlmsg_seq = seq;
-
- nfh = mnl_nlmsg_put_extra_header(nlh, sizeof(struct nfgenmsg));
- nfh->nfgen_family = family;
- nfh->version = NFNETLINK_V0;
- nfh->res_id = 0;
-
- return nlh;
-}
-
-static int nfct_stats_attr_cb(const struct nlattr *attr, void *data) {
- const struct nlattr **tb = data;
- int type = mnl_attr_get_type(attr);
-
- if (mnl_attr_type_valid(attr, CTA_STATS_MAX) < 0)
- return MNL_CB_OK;
-
- if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
- collector_error("NFSTAT: mnl_attr_validate() failed");
- return MNL_CB_ERROR;
- }
-
- tb[type] = attr;
- return MNL_CB_OK;
-}
-
-static int nfstat_callback(const struct nlmsghdr *nlh, void *data) {
- (void)data;
-
- struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
-
- mnl_attr_parse(nlh, sizeof(*nfg), nfct_stats_attr_cb, nfstat_root.tb);
-
- // printf("cpu=%-4u\t", ntohs(nfg->res_id));
-
- int i;
- // add the metrics of this CPU into the metrics
- for (i = 0; i < CTA_STATS_MAX+1; i++) {
- if (nfstat_root.tb[i]) {
- // printf("%s=%u ", nfstat_root.attr2name[i], ntohl(mnl_attr_get_u32(nfstat_root.tb[i])));
- nfstat_root.metrics[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb[i]));
- }
- }
- // printf("\n");
-
- return MNL_CB_OK;
-}
-
-static int nfstat_collect_conntrack() {
- // zero all metrics - we will sum the metrics of all CPUs later
- int i;
- for (i = 0; i < CTA_STATS_MAX+1; i++)
- nfstat_root.metrics[i] = 0;
-
- // prepare the request
- nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
-
- // send the request
- if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
- collector_error("NFSTAT: mnl_socket_sendto() failed");
- return 1;
- }
-
- // get the reply
- ssize_t ret;
- while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
- if(mnl_cb_run(
- nfstat_root.buf
- , (size_t)ret
- , nfstat_root.nlh->nlmsg_seq
- , nfstat_root.portid
- , nfstat_callback
- , NULL
- ) <= MNL_CB_STOP)
- break;
- }
-
- // verify we run without issues
- if (ret == -1) {
- collector_error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
- return 1;
- }
-
- return 0;
-}
-
-static int nfexp_stats_attr_cb(const struct nlattr *attr, void *data)
-{
- const struct nlattr **tb = data;
- int type = mnl_attr_get_type(attr);
-
- if (mnl_attr_type_valid(attr, CTA_STATS_EXP_MAX) < 0)
- return MNL_CB_OK;
-
- if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
- collector_error("NFSTAT EXP: mnl_attr_validate() failed");
- return MNL_CB_ERROR;
- }
-
- tb[type] = attr;
- return MNL_CB_OK;
-}
-
-static int nfstat_callback_exp(const struct nlmsghdr *nlh, void *data) {
- (void)data;
-
- struct nfgenmsg *nfg = mnl_nlmsg_get_payload(nlh);
-
- mnl_attr_parse(nlh, sizeof(*nfg), nfexp_stats_attr_cb, nfstat_root.tb_exp);
-
- int i;
- for (i = 0; i < CTA_STATS_EXP_MAX+1; i++) {
- if (nfstat_root.tb_exp[i]) {
- nfstat_root.metrics_exp[i] += ntohl(mnl_attr_get_u32(nfstat_root.tb_exp[i]));
- }
- }
-
- return MNL_CB_OK;
-}
-
-static int nfstat_collect_conntrack_expectations() {
- // zero all metrics - we will sum the metrics of all CPUs later
- int i;
- for (i = 0; i < CTA_STATS_EXP_MAX+1; i++)
- nfstat_root.metrics_exp[i] = 0;
-
- // prepare the request
- nfstat_root.nlh = nfct_mnl_nlmsghdr_put(nfstat_root.buf, NFNL_SUBSYS_CTNETLINK_EXP, IPCTNL_MSG_EXP_GET_STATS_CPU, AF_UNSPEC, nfstat_root.seq);
-
- // send the request
- if(mnl_socket_sendto(nfstat_root.mnl, nfstat_root.nlh, nfstat_root.nlh->nlmsg_len) < 0) {
- collector_error("NFSTAT: mnl_socket_sendto() failed");
- return 1;
- }
-
- // get the reply
- ssize_t ret;
- while ((ret = mnl_socket_recvfrom(nfstat_root.mnl, nfstat_root.buf, nfstat_root.buf_size)) > 0) {
- if(mnl_cb_run(
- nfstat_root.buf
- , (size_t)ret
- , nfstat_root.nlh->nlmsg_seq
- , nfstat_root.portid
- , nfstat_callback_exp
- , NULL
- ) <= MNL_CB_STOP)
- break;
- }
-
- // verify we run without issues
- if (ret == -1) {
- collector_error("NFSTAT: error communicating with kernel. This plugin can only work when netdata runs as root.");
- return 1;
- }
-
- return 0;
-}
-
-static int nfstat_collect() {
- nfstat_root.seq++;
-
- if(nfstat_collect_conntrack())
- return 1;
-
- if(nfstat_collect_conntrack_expectations())
- return 1;
-
- return 0;
-}
-
-static void nfstat_send_metrics() {
- static int new_chart_generated = 0, changes_chart_generated = 0, search_chart_generated = 0, errors_chart_generated = 0, expect_chart_generated = 0;
-
- if(!new_chart_generated) {
- new_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Connection Tracker New Connections' 'connections/s' %s '' line %d %d %s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_new"
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NETDATA_CHART_PRIO_NETFILTER_NEW
- , nfstat_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_NEW]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_IGNORE]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_INVALID]);
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_new"
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_NEW]
- , (collected_number) nfstat_root.metrics[CTA_STATS_NEW]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_IGNORE]
- , (collected_number) nfstat_root.metrics[CTA_STATS_IGNORE]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_INVALID]
- , (collected_number) nfstat_root.metrics[CTA_STATS_INVALID]
- );
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(!changes_chart_generated) {
- changes_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Connection Tracker Changes' 'changes/s' %s '' line %d %d detail %s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NETDATA_CHART_PRIO_NETFILTER_CHANGES
- , nfstat_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_INSERT]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DELETE]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DELETE_LIST]);
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_INSERT]
- , (collected_number) nfstat_root.metrics[CTA_STATS_INSERT]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_DELETE]
- , (collected_number) nfstat_root.metrics[CTA_STATS_DELETE]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_DELETE_LIST]
- , (collected_number) nfstat_root.metrics[CTA_STATS_DELETE_LIST]
- );
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(!search_chart_generated) {
- search_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Connection Tracker Searches' 'searches/s' %s '' line %d %d detail %s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_search"
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NETDATA_CHART_PRIO_NETFILTER_SEARCH
- , nfstat_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_SEARCHED]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART]);
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_FOUND]);
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_search"
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_SEARCHED]
- , (collected_number) nfstat_root.metrics[CTA_STATS_SEARCHED]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_SEARCH_RESTART]
- , (collected_number) nfstat_root.metrics[CTA_STATS_SEARCH_RESTART]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_FOUND]
- , (collected_number) nfstat_root.metrics[CTA_STATS_FOUND]
- );
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(!errors_chart_generated) {
- errors_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Connection Tracker Errors' 'events/s' %s '' line %d %d detail %s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NETDATA_CHART_PRIO_NETFILTER_ERRORS
- , nfstat_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_ERROR]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_INSERT_FAILED]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_DROP]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_EARLY_DROP]);
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_ERROR]
- , (collected_number) nfstat_root.metrics[CTA_STATS_ERROR]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_INSERT_FAILED]
- , (collected_number) nfstat_root.metrics[CTA_STATS_INSERT_FAILED]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_DROP]
- , (collected_number) nfstat_root.metrics[CTA_STATS_DROP]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_EARLY_DROP]
- , (collected_number) nfstat_root.metrics[CTA_STATS_EARLY_DROP]
- );
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(!expect_chart_generated) {
- expect_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Connection Tracker Expectations' 'expectations/s' %s '' line %d %d detail %s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NETDATA_CHART_PRIO_NETFILTER_EXPECT
- , nfstat_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_CREATE]);
- printf("DIMENSION %s '' incremental -1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_DELETE]);
- printf("DIMENSION %s '' incremental 1 1\n", nfstat_root.attr2name[CTA_STATS_EXP_NEW]);
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_EXP_CREATE]
- , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_CREATE]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_EXP_DELETE]
- , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_DELETE]
- );
- printf(
- "SET %s = %lld\n"
- , nfstat_root.attr2name[CTA_STATS_EXP_NEW]
- , (collected_number) nfstat_root.metrics[CTA_STATS_EXP_NEW]
- );
- printf("END\n");
-}
-
-
-struct nfacct_data {
- char *name;
- uint32_t hash;
-
- uint64_t pkts;
- uint64_t bytes;
-
- int packets_dimension_added;
- int bytes_dimension_added;
-
- int updated;
-
- struct nfacct_data *next;
-};
-
-static struct {
- int update_every;
- char *buf;
- size_t buf_size;
- struct mnl_socket *mnl;
- struct nlmsghdr *nlh;
- unsigned int seq;
- uint32_t portid;
- struct nfacct *nfacct_buffer;
- struct nfacct_data *nfacct_metrics;
-} nfacct_root = {
- .update_every = 1,
- .buf = NULL,
- .buf_size = 0,
- .mnl = NULL,
- .nlh = NULL,
- .seq = 0,
- .portid = 0,
- .nfacct_buffer = NULL,
- .nfacct_metrics = NULL
-};
-
-static inline struct nfacct_data *nfacct_data_get(const char *name, uint32_t hash) {
- struct nfacct_data *d = NULL, *last = NULL;
- for(d = nfacct_root.nfacct_metrics; d ; last = d, d = d->next) {
- if(unlikely(d->hash == hash && !strcmp(d->name, name)))
- return d;
- }
-
- d = callocz(1, sizeof(struct nfacct_data));
- d->name = strdupz(name);
- d->hash = hash;
-
- if(!last) {
- d->next = nfacct_root.nfacct_metrics;
- nfacct_root.nfacct_metrics = d;
- }
- else {
- d->next = last->next;
- last->next = d;
- }
-
- return d;
-}
-
-static int nfacct_init(int update_every) {
- nfacct_root.update_every = update_every;
-
- nfacct_root.buf_size = mnl_buffer_size();
- nfacct_root.buf = mallocz(nfacct_root.buf_size);
-
- nfacct_root.nfacct_buffer = nfacct_alloc();
- if(!nfacct_root.nfacct_buffer) {
- collector_error("nfacct.plugin: nfacct_alloc() failed.");
- return 0;
- }
-
- nfacct_root.seq = (unsigned int)now_realtime_sec() - 1;
-
- nfacct_root.mnl = mnl_socket_open(NETLINK_NETFILTER);
- if(!nfacct_root.mnl) {
- collector_error("nfacct.plugin: mnl_socket_open() failed");
- return 1;
- }
-
- if(mnl_socket_bind(nfacct_root.mnl, 0, MNL_SOCKET_AUTOPID) < 0) {
- collector_error("nfacct.plugin: mnl_socket_bind() failed");
- return 1;
- }
- nfacct_root.portid = mnl_socket_get_portid(nfacct_root.mnl);
-
- return 0;
-}
-
-static int nfacct_callback(const struct nlmsghdr *nlh, void *data) {
- (void)data;
-
- if(nfacct_nlmsg_parse_payload(nlh, nfacct_root.nfacct_buffer) < 0) {
- collector_error("NFACCT: nfacct_nlmsg_parse_payload() failed.");
- return MNL_CB_OK;
- }
-
- const char *name = nfacct_attr_get_str(nfacct_root.nfacct_buffer, NFACCT_ATTR_NAME);
- uint32_t hash = simple_hash(name);
-
- struct nfacct_data *d = nfacct_data_get(name, hash);
-
- d->pkts = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_PKTS);
- d->bytes = nfacct_attr_get_u64(nfacct_root.nfacct_buffer, NFACCT_ATTR_BYTES);
- d->updated = 1;
-
- return MNL_CB_OK;
-}
-
-static int nfacct_collect() {
- // mark all old metrics as not-updated
- struct nfacct_data *d;
- for(d = nfacct_root.nfacct_metrics; d ; d = d->next)
- d->updated = 0;
-
- // prepare the request
- nfacct_root.seq++;
- nfacct_root.nlh = nfacct_nlmsg_build_hdr(nfacct_root.buf, NFNL_MSG_ACCT_GET, NLM_F_DUMP, (uint32_t)nfacct_root.seq);
- if(!nfacct_root.nlh) {
- collector_error("NFACCT: nfacct_nlmsg_build_hdr() failed");
- return 1;
- }
-
- // send the request
- if(mnl_socket_sendto(nfacct_root.mnl, nfacct_root.nlh, nfacct_root.nlh->nlmsg_len) < 0) {
- collector_error("NFACCT: mnl_socket_sendto() failed");
- return 1;
- }
-
- // get the reply
- ssize_t ret;
- while((ret = mnl_socket_recvfrom(nfacct_root.mnl, nfacct_root.buf, nfacct_root.buf_size)) > 0) {
- if(mnl_cb_run(
- nfacct_root.buf
- , (size_t)ret
- , nfacct_root.seq
- , nfacct_root.portid
- , nfacct_callback
- , NULL
- ) <= 0)
- break;
- }
-
- // verify we run without issues
- if (ret == -1) {
- collector_error("NFACCT: error communicating with kernel. This plugin can only work when netdata runs as root.");
- return 1;
- }
-
- return 0;
-}
-
-static void nfacct_send_metrics() {
- static int bytes_chart_generated = 0, packets_chart_generated = 0;
-
- if(!nfacct_root.nfacct_metrics) return;
- struct nfacct_data *d;
-
- if(!packets_chart_generated) {
- packets_chart_generated = 1;
- printf("CHART netfilter.nfacct_packets '' 'Netfilter Accounting Packets' 'packets/s' 'nfacct' '' stacked %d %d %s\n"
- , NETDATA_CHART_PRIO_NETFILTER_PACKETS
- , nfacct_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- }
-
- for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
- if(likely(d->updated)) {
- if(unlikely(!d->packets_dimension_added)) {
- d->packets_dimension_added = 1;
- printf(
- "CHART netfilter.nfacct_packets '' 'Netfilter Accounting Packets' 'packets/s' 'nfacct' '' stacked %d %d %s\n",
- NETDATA_CHART_PRIO_NETFILTER_PACKETS,
- nfacct_root.update_every,
- PLUGIN_NFACCT_NAME);
- printf("DIMENSION %s '' incremental 1 %d\n", d->name, nfacct_root.update_every);
- }
- }
- }
- printf("BEGIN netfilter.nfacct_packets\n");
- for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
- if(likely(d->updated)) {
- printf("SET %s = %lld\n"
- , d->name
- , (collected_number)d->pkts
- );
- }
- }
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(!bytes_chart_generated) {
- bytes_chart_generated = 1;
- printf("CHART netfilter.nfacct_bytes '' 'Netfilter Accounting Bandwidth' 'kilobytes/s' 'nfacct' '' stacked %d %d %s\n"
- , NETDATA_CHART_PRIO_NETFILTER_BYTES
- , nfacct_root.update_every
- , PLUGIN_NFACCT_NAME
- );
- }
-
- for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
- if(likely(d->updated)) {
- if(unlikely(!d->bytes_dimension_added)) {
- d->bytes_dimension_added = 1;
- printf(
- "CHART netfilter.nfacct_bytes '' 'Netfilter Accounting Bandwidth' 'kilobytes/s' 'nfacct' '' stacked %d %d %s\n",
- NETDATA_CHART_PRIO_NETFILTER_BYTES,
- nfacct_root.update_every,
- PLUGIN_NFACCT_NAME);
- printf("DIMENSION %s '' incremental 1 %d\n", d->name, 1000 * nfacct_root.update_every);
- }
- }
- }
- printf("BEGIN netfilter.nfacct_bytes\n");
- for(d = nfacct_root.nfacct_metrics; d ; d = d->next) {
- if(likely(d->updated)) {
- printf("SET %s = %lld\n"
- , d->name
- , (collected_number)d->bytes
- );
- }
- }
- printf("END\n");
-}
-
-static void nfacct_signal_handler(int signo)
-{
- exit((signo == SIGPIPE)?1:0);
-}
-
-// When Netdata crashes this plugin was becoming zombie,
-// this function was added to remove it when sigpipe and other signals are received.
-void nfacct_signals()
-{
- int signals[] = { SIGPIPE, SIGINT, SIGTERM, 0};
- int i;
- struct sigaction sa;
- sa.sa_flags = 0;
- sa.sa_handler = nfacct_signal_handler;
-
- // ignore all signals while we run in a signal handler
- sigfillset(&sa.sa_mask);
-
- for (i = 0; signals[i]; i++) {
- if(sigaction(signals[i], &sa, NULL) == -1)
- collector_error("Cannot add the handler to signal %d", signals[i]);
- }
-}
-
-int main(int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("nfacct.plugin");
-
- // ------------------------------------------------------------------------
- // parse command line parameters
-
- int i, freq = 0;
- for(i = 1; i < argc ; i++) {
- if(isdigit(*argv[i]) && !freq) {
- int n = str2i(argv[i]);
- if(n > 0 && n < 86400) {
- freq = n;
- continue;
- }
- }
- else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("nfacct.plugin %s\n", VERSION);
- exit(0);
- }
- else if(strcmp("debug", argv[i]) == 0) {
- debug = 1;
- continue;
- }
- else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata nfacct.plugin %s\n"
- " Copyright (C) 2015-2017 Costa Tsaousis <costa@tsaousis.gr>\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " COLLECTION_FREQUENCY data collection frequency in seconds\n"
- " minimum: %d\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n"
- " -v\n"
- " -V\n"
- " --version print version and exit\n"
- "\n"
- " -h\n"
- " --help print this message and exit\n"
- "\n"
- " For more information:\n"
- " https://github.com/netdata/netdata/tree/master/collectors/nfacct.plugin\n"
- "\n"
- , VERSION
- , netdata_update_every
- );
- exit(1);
- }
-
- collector_error("nfacct.plugin: ignoring parameter '%s'", argv[i]);
- }
-
- nfacct_signals();
-
- errno = 0;
-
- if(freq >= netdata_update_every)
- netdata_update_every = freq;
- else if(freq)
- collector_error("update frequency %d seconds is too small for NFACCT. Using %d.", freq, netdata_update_every);
-
- if (debug)
- fprintf(stderr, "nfacct.plugin: calling nfacct_init()\n");
- int nfacct = !nfacct_init(netdata_update_every);
-
- if (debug)
- fprintf(stderr, "nfacct.plugin: calling nfstat_init()\n");
- int nfstat = !nfstat_init(netdata_update_every);
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if(debug) fprintf(stderr, "nfacct.plugin: starting data collection\n");
-
- time_t started_t = now_monotonic_sec();
-
- size_t iteration;
- usec_t step = netdata_update_every * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
-
- if(unlikely(netdata_exit)) break;
-
- if(debug && iteration)
- fprintf(stderr, "nfacct.plugin: iteration %zu, dt %"PRIu64" usec\n"
- , iteration
- , dt
- );
-
- if(likely(nfacct)) {
- if(debug) fprintf(stderr, "nfacct.plugin: calling nfacct_collect()\n");
- nfacct = !nfacct_collect();
-
- if(likely(nfacct)) {
- if(debug) fprintf(stderr, "nfacct.plugin: calling nfacct_send_metrics()\n");
- nfacct_send_metrics();
- }
- }
-
- if(likely(nfstat)) {
- if(debug) fprintf(stderr, "nfacct.plugin: calling nfstat_collect()\n");
- nfstat = !nfstat_collect();
-
- if(likely(nfstat)) {
- if(debug) fprintf(stderr, "nfacct.plugin: calling nfstat_send_metrics()\n");
- nfstat_send_metrics();
- }
- }
-
- fflush(stdout);
-
- if (now_monotonic_sec() - started_t > NFACCT_RESTART_EVERY_SECONDS) {
- collector_info("NFACCT reached my lifetime expectancy. Exiting to restart.");
- fprintf(stdout, "EXIT\n");
- fflush(stdout);
- exit(0);
- }
- }
-}
diff --git a/collectors/perf.plugin/Makefile.am b/collectors/perf.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/perf.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md
deleted file mode 120000
index fb8a0cd69..000000000
--- a/collectors/perf.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/cpu_performance.md \ No newline at end of file
diff --git a/collectors/perf.plugin/integrations/cpu_performance.md b/collectors/perf.plugin/integrations/cpu_performance.md
deleted file mode 100644
index d3c316d2e..000000000
--- a/collectors/perf.plugin/integrations/cpu_performance.md
+++ /dev/null
@@ -1,192 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/perf.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/perf.plugin/metadata.yaml"
-sidebar_label: "CPU performance"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# CPU performance
-
-
-<img src="https://netdata.cloud/img/bolt.svg" width="150"/>
-
-
-Plugin: perf.plugin
-Module: perf.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.
-
-It uses syscall (2) to open a file descriptior to monitor the perf events.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-It needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time.
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per CPU performance instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| perf.cpu_cycles | cpu, ref_cpu | cycles/s |
-| perf.instructions | instructions | instructions/s |
-| perf.instructions_per_cycle | ipc | instructions/cycle |
-| perf.branch_instructions | instructions, misses | instructions/s |
-| perf.cache | references, misses | operations/s |
-| perf.bus_cycles | bus | cycles/s |
-| perf.stalled_cycles | frontend, backend | cycles/s |
-| perf.migrations | migrations | migrations |
-| perf.alignment_faults | faults | faults |
-| perf.emulation_faults | faults | faults |
-| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |
-| perf.l1d_cache_prefetch | prefetches | prefetches/s |
-| perf.l1i_cache | read_access, read_misses | events/s |
-| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |
-| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |
-| perf.itlb_cache | read_access, read_misses | events/s |
-| perf.pbu_cache | read_access | events/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install perf plugin
-
-If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.
-
-
-#### Enable the pref plugin
-
-The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.
-
-To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config netdata.conf
-```
-
-Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:perf]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-You can get the available options running:
-
-```bash
-/usr/libexec/netdata/plugins.d/perf.plugin --help
-````
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |
-
-</details>
-
-#### Examples
-
-##### All metrics
-
-Monitor all metrics available.
-
-```yaml
-[plugin:perf]
- command options = all
-
-```
-##### CPU cycles
-
-Monitor CPU cycles.
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:perf]
- command options = cycles
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-
-
diff --git a/collectors/perf.plugin/metadata.yaml b/collectors/perf.plugin/metadata.yaml
deleted file mode 100644
index eada3351d..000000000
--- a/collectors/perf.plugin/metadata.yaml
+++ /dev/null
@@ -1,252 +0,0 @@
-plugin_name: perf.plugin
-modules:
- - meta:
- plugin_name: perf.plugin
- module_name: perf.plugin
- monitored_instance:
- name: CPU performance
- link: "https://kernel.org/"
- categories:
- - data-collection.linux-systems
- icon_filename: "bolt.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - linux
- - cpu performance
- - cpu cache
- - perf.plugin
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more."
- method_description: "It uses syscall (2) to open a file descriptior to monitor the perf events."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "It needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time."
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Install perf plugin
- description: |
- If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.
- - title: Enable the pref plugin
- description: |
- The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.
-
- To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config netdata.conf
- ```
-
- Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:perf]"
- description: "The main netdata configuration file."
- options:
- description: |
- You can get the available options running:
-
- ```bash
- /usr/libexec/netdata/plugins.d/perf.plugin --help
- ````
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: command options
- description: Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`.
- default_value: 1
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: All metrics
- folding:
- enabled: false
- description: Monitor all metrics available.
- config: |
- [plugin:perf]
- command options = all
- - name: CPU cycles
- description: Monitor CPU cycles.
- config: |
- [plugin:perf]
- command options = cycles
- troubleshooting:
- problems:
- list:
- - name: Debug Mode
- description: |
- You can run `perf.plugin` with the debug option enabled, to troubleshoot issues with it. The output should give you clues as to why the collector isn't working.
-
- - Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
- - Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
- - Run the `perf.plugin` in debug mode:
-
- ```bash
- ./perf.plugin 1 all debug
- ```
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: perf.cpu_cycles
- description: CPU cycles
- unit: "cycles/s"
- chart_type: line
- dimensions:
- - name: cpu
- - name: ref_cpu
- - name: perf.instructions
- description: Instructions
- unit: "instructions/s"
- chart_type: line
- dimensions:
- - name: instructions
- - name: perf.instructions_per_cycle
- description: Instructions per Cycle(IPC)
- unit: "instructions/cycle"
- chart_type: line
- dimensions:
- - name: ipc
- - name: perf.branch_instructions
- description: Branch instructions
- unit: "instructions/s"
- chart_type: line
- dimensions:
- - name: instructions
- - name: misses
- - name: perf.cache
- description: Cache operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: references
- - name: misses
- - name: perf.bus_cycles
- description: Bus cycles
- unit: "cycles/s"
- chart_type: line
- dimensions:
- - name: bus
- - name: perf.stalled_cycles
- description: Stalled frontend and backend cycles
- unit: "cycles/s"
- chart_type: line
- dimensions:
- - name: frontend
- - name: backend
- - name: perf.migrations
- description: CPU migrations
- unit: "migrations"
- chart_type: line
- dimensions:
- - name: migrations
- - name: perf.alignment_faults
- description: Alignment faults
- unit: "faults"
- chart_type: line
- dimensions:
- - name: faults
- - name: perf.emulation_faults
- description: Emulation faults
- unit: "faults"
- chart_type: line
- dimensions:
- - name: faults
- - name: perf.l1d_cache
- description: L1D cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
- - name: read_misses
- - name: write_access
- - name: write_misses
- - name: perf.l1d_cache_prefetch
- description: L1D prefetch cache operations
- unit: "prefetches/s"
- chart_type: line
- dimensions:
- - name: prefetches
- - name: perf.l1i_cache
- description: L1I cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
- - name: read_misses
- - name: perf.ll_cache
- description: LL cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
- - name: read_misses
- - name: write_access
- - name: write_misses
- - name: perf.dtlb_cache
- description: DTLB cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
- - name: read_misses
- - name: write_access
- - name: write_misses
- - name: perf.itlb_cache
- description: ITLB cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
- - name: read_misses
- - name: perf.pbu_cache
- description: PBU cache operations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: read_access
diff --git a/collectors/perf.plugin/perf_plugin.c b/collectors/perf.plugin/perf_plugin.c
deleted file mode 100644
index fe3b04daa..000000000
--- a/collectors/perf.plugin/perf_plugin.c
+++ /dev/null
@@ -1,1342 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#include <linux/perf_event.h>
-
-#define PLUGIN_PERF_NAME "perf.plugin"
-
-// Hardware counters
-#define NETDATA_CHART_PRIO_PERF_CPU_CYCLES 8800
-#define NETDATA_CHART_PRIO_PERF_INSTRUCTIONS 8801
-#define NETDATA_CHART_PRIO_PERF_IPC 8802
-#define NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUCTIONS 8803
-#define NETDATA_CHART_PRIO_PERF_CACHE 8804
-#define NETDATA_CHART_PRIO_PERF_BUS_CYCLES 8805
-#define NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES 8806
-
-// Software counters
-#define NETDATA_CHART_PRIO_PERF_MIGRATIONS 8810
-#define NETDATA_CHART_PRIO_PERF_ALIGNMENT 8811
-#define NETDATA_CHART_PRIO_PERF_EMULATION 8812
-
-// Hardware cache counters
-#define NETDATA_CHART_PRIO_PERF_L1D 8820
-#define NETDATA_CHART_PRIO_PERF_L1D_PREFETCH 8821
-#define NETDATA_CHART_PRIO_PERF_L1I 8822
-#define NETDATA_CHART_PRIO_PERF_LL 8823
-#define NETDATA_CHART_PRIO_PERF_DTLB 8824
-#define NETDATA_CHART_PRIO_PERF_ITLB 8825
-#define NETDATA_CHART_PRIO_PERF_PBU 8826
-
-#define RRD_TYPE_PERF "perf"
-#define RRD_FAMILY_HW "hardware"
-#define RRD_FAMILY_SW "software"
-#define RRD_FAMILY_CACHE "cache"
-
-#define NO_FD -1
-#define ALL_PIDS -1
-#define RUNNING_THRESHOLD 100
-
-static int debug = 0;
-
-static int update_every = 1;
-static int freq = 0;
-
-typedef enum perf_event_id {
- // Hardware counters
- EV_ID_CPU_CYCLES,
- EV_ID_INSTRUCTIONS,
- EV_ID_CACHE_REFERENCES,
- EV_ID_CACHE_MISSES,
- EV_ID_BRANCH_INSTRUCTIONS,
- EV_ID_BRANCH_MISSES,
- EV_ID_BUS_CYCLES,
- EV_ID_STALLED_CYCLES_FRONTEND,
- EV_ID_STALLED_CYCLES_BACKEND,
- EV_ID_REF_CPU_CYCLES,
-
- // Software counters
- // EV_ID_CPU_CLOCK,
- // EV_ID_TASK_CLOCK,
- // EV_ID_PAGE_FAULTS,
- // EV_ID_CONTEXT_SWITCHES,
- EV_ID_CPU_MIGRATIONS,
- // EV_ID_PAGE_FAULTS_MIN,
- // EV_ID_PAGE_FAULTS_MAJ,
- EV_ID_ALIGNMENT_FAULTS,
- EV_ID_EMULATION_FAULTS,
-
- // Hardware cache counters
- EV_ID_L1D_READ_ACCESS,
- EV_ID_L1D_READ_MISS,
- EV_ID_L1D_WRITE_ACCESS,
- EV_ID_L1D_WRITE_MISS,
- EV_ID_L1D_PREFETCH_ACCESS,
-
- EV_ID_L1I_READ_ACCESS,
- EV_ID_L1I_READ_MISS,
-
- EV_ID_LL_READ_ACCESS,
- EV_ID_LL_READ_MISS,
- EV_ID_LL_WRITE_ACCESS,
- EV_ID_LL_WRITE_MISS,
-
- EV_ID_DTLB_READ_ACCESS,
- EV_ID_DTLB_READ_MISS,
- EV_ID_DTLB_WRITE_ACCESS,
- EV_ID_DTLB_WRITE_MISS,
-
- EV_ID_ITLB_READ_ACCESS,
- EV_ID_ITLB_READ_MISS,
-
- EV_ID_PBU_READ_ACCESS,
-
- EV_ID_END
-} perf_event_id_t;
-
-enum perf_event_group {
- EV_GROUP_CYCLES,
- EV_GROUP_INSTRUCTIONS_AND_CACHE,
- EV_GROUP_SOFTWARE,
- EV_GROUP_CACHE_L1D,
- EV_GROUP_CACHE_L1I_LL_DTLB,
- EV_GROUP_CACHE_ITLB_BPU,
-
- EV_GROUP_NUM
-};
-
-static int number_of_cpus;
-
-static int *group_leader_fds[EV_GROUP_NUM];
-
-static struct perf_event {
- perf_event_id_t id;
-
- int type;
- int config;
-
- int **group_leader_fd;
- int *fd;
-
- int disabled;
- int updated;
-
- uint64_t value;
-
- uint64_t *prev_value;
- uint64_t *prev_time_enabled;
- uint64_t *prev_time_running;
-} perf_events[] = {
- // Hardware counters
- {EV_ID_CPU_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_INSTRUCTIONS, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_CACHE_REFERENCES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_CACHE_MISSES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_BRANCH_INSTRUCTIONS, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_BRANCH_MISSES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES, &group_leader_fds[EV_GROUP_INSTRUCTIONS_AND_CACHE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_BUS_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_STALLED_CYCLES_FRONTEND, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_STALLED_CYCLES_BACKEND, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES, &group_leader_fds[EV_GROUP_CYCLES], NULL, 1, 0, 0, NULL, NULL, NULL},
-
- // Software counters
- // {EV_ID_CPU_CLOCK, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- // {EV_ID_TASK_CLOCK, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- // {EV_ID_PAGE_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- // {EV_ID_CONTEXT_SWITCHES, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_CPU_MIGRATIONS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- // {EV_ID_PAGE_FAULTS_MIN, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- // {EV_ID_PAGE_FAULTS_MAJ, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_ALIGNMENT_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
- {EV_ID_EMULATION_FAULTS, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS, &group_leader_fds[EV_GROUP_SOFTWARE], NULL, 1, 0, 0, NULL, NULL, NULL},
-
- // Hardware cache counters
- {
- EV_ID_L1D_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_L1D_READ_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_L1D_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_L1D_WRITE_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_L1D_PREFETCH_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1D) | (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1D], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {
- EV_ID_L1I_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1I) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_L1I_READ_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_L1I) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {
- EV_ID_LL_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_LL_READ_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_LL_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_LL_WRITE_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_LL) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {
- EV_ID_DTLB_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_DTLB_READ_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_DTLB_WRITE_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_L1I_LL_DTLB], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_DTLB_WRITE_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_DTLB) | (PERF_COUNT_HW_CACHE_OP_WRITE << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {
- EV_ID_ITLB_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_ITLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
- }, {
- EV_ID_ITLB_READ_MISS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_ITLB) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
- &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {
- EV_ID_PBU_READ_ACCESS, PERF_TYPE_HW_CACHE,
- (PERF_COUNT_HW_CACHE_BPU) | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
- &group_leader_fds[EV_GROUP_CACHE_ITLB_BPU], NULL, 1, 0, 0, NULL, NULL, NULL
- },
-
- {EV_ID_END, 0, 0, NULL, NULL, 0, 0, 0, NULL, NULL, NULL}
-};
-
-static int perf_init() {
- int cpu, group;
- struct perf_event_attr perf_event_attr;
- struct perf_event *current_event = NULL;
- unsigned long flags = 0;
-
- number_of_cpus = (int)get_system_cpus();
-
- // initialize all perf event file descriptors
- for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
- current_event->fd = mallocz(number_of_cpus * sizeof(int));
- memset(current_event->fd, NO_FD, number_of_cpus * sizeof(int));
-
- current_event->prev_value = mallocz(number_of_cpus * sizeof(uint64_t));
- memset(current_event->prev_value, 0, number_of_cpus * sizeof(uint64_t));
-
- current_event->prev_time_enabled = mallocz(number_of_cpus * sizeof(uint64_t));
- memset(current_event->prev_time_enabled, 0, number_of_cpus * sizeof(uint64_t));
-
- current_event->prev_time_running = mallocz(number_of_cpus * sizeof(uint64_t));
- memset(current_event->prev_time_running, 0, number_of_cpus * sizeof(uint64_t));
- }
-
- for(group = 0; group < EV_GROUP_NUM; group++) {
- group_leader_fds[group] = mallocz(number_of_cpus * sizeof(int));
- memset(group_leader_fds[group], NO_FD, number_of_cpus * sizeof(int));
- }
-
- memset(&perf_event_attr, 0, sizeof(perf_event_attr));
-
- for(cpu = 0; cpu < number_of_cpus; cpu++) {
- for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
- if(unlikely(current_event->disabled)) continue;
-
- perf_event_attr.type = current_event->type;
- perf_event_attr.config = current_event->config;
- perf_event_attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
-
- int fd, group_leader_fd = *(*current_event->group_leader_fd + cpu);
-
- fd = syscall(
- __NR_perf_event_open,
- &perf_event_attr,
- ALL_PIDS,
- cpu,
- group_leader_fd,
- flags
- );
-
- if(unlikely(group_leader_fd == NO_FD)) group_leader_fd = fd;
-
- if(unlikely(fd < 0)) {
- switch errno {
- case EACCES:
- collector_error("Cannot access to the PMU: Permission denied");
- break;
- case EBUSY:
- collector_error("Another event already has exclusive access to the PMU");
- break;
- default:
- collector_error("Cannot open perf event");
- }
- collector_error("Disabling event %u", current_event->id);
- current_event->disabled = 1;
- }
-
- *(current_event->fd + cpu) = fd;
- *(*current_event->group_leader_fd + cpu) = group_leader_fd;
-
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: event id = %u, cpu = %d, fd = %d, leader_fd = %d\n", current_event->id, cpu, fd, group_leader_fd);
- }
- }
-
- return 0;
-}
-
-static void perf_free(void) {
- int cpu, group;
- struct perf_event *current_event = NULL;
-
- for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
- for(cpu = 0; cpu < number_of_cpus; cpu++)
- if(*(current_event->fd + cpu) != NO_FD) close(*(current_event->fd + cpu));
-
- free(current_event->fd);
- free(current_event->prev_value);
- free(current_event->prev_time_enabled);
- free(current_event->prev_time_running);
- }
-
- for(group = 0; group < EV_GROUP_NUM; group++)
- free(group_leader_fds[group]);
-}
-
-static void reenable_events() {
- int group, cpu;
-
- for(group = 0; group < EV_GROUP_NUM; group++) {
- for(cpu = 0; cpu < number_of_cpus; cpu++) {
- int current_fd = *(group_leader_fds[group] + cpu);
-
- if(unlikely(current_fd == NO_FD)) continue;
-
- if(ioctl(current_fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP) == -1
- || ioctl(current_fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP) == -1)
- {
- collector_error("Cannot reenable event group");
- }
- }
- }
-}
-
-static int perf_collect() {
- int cpu;
- struct perf_event *current_event = NULL;
- static uint64_t prev_cpu_cycles_value = 0;
- struct {
- uint64_t value;
- uint64_t time_enabled;
- uint64_t time_running;
- } read_result;
-
- for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
- current_event->updated = 0;
- current_event->value = 0;
-
- if(unlikely(current_event->disabled)) continue;
-
- for(cpu = 0; cpu < number_of_cpus; cpu++) {
-
- ssize_t read_size = read(current_event->fd[cpu], &read_result, sizeof(read_result));
-
- if(likely(read_size == sizeof(read_result))) {
- if (likely(read_result.time_running
- && read_result.time_running != *(current_event->prev_time_running + cpu)
- && (read_result.time_enabled / read_result.time_running < RUNNING_THRESHOLD))) {
- current_event->value += (read_result.value - *(current_event->prev_value + cpu)) \
- * (read_result.time_enabled - *(current_event->prev_time_enabled + cpu)) \
- / (read_result.time_running - *(current_event->prev_time_running + cpu));
- }
-
- *(current_event->prev_value + cpu) = read_result.value;
- *(current_event->prev_time_enabled + cpu) = read_result.time_enabled;
- *(current_event->prev_time_running + cpu) = read_result.time_running;
-
- current_event->updated = 1;
- }
- else {
- collector_error("Cannot update value for event %u", current_event->id);
- return 1;
- }
- }
-
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: successfully read event id = %u, value = %"PRIu64"\n", current_event->id, current_event->value);
- }
-
- if(unlikely(perf_events[EV_ID_CPU_CYCLES].value == prev_cpu_cycles_value))
- reenable_events();
- prev_cpu_cycles_value = perf_events[EV_ID_CPU_CYCLES].value;
-
- return 0;
-}
-
-static void perf_send_metrics() {
- static int // Hardware counters
- cpu_cycles_chart_generated = 0,
- instructions_chart_generated = 0,
- ipc_chart_generated = 0,
- branch_chart_generated = 0,
- cache_chart_generated = 0,
- bus_cycles_chart_generated = 0,
- stalled_cycles_chart_generated = 0,
-
- // Software counters
- migrations_chart_generated = 0,
- alignment_chart_generated = 0,
- emulation_chart_generated = 0,
-
- // Hardware cache counters
- L1D_chart_generated = 0,
- L1D_prefetch_chart_generated = 0,
- L1I_chart_generated = 0,
- LL_chart_generated = 0,
- DTLB_chart_generated = 0,
- ITLB_chart_generated = 0,
- PBU_chart_generated = 0;
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_CPU_CYCLES].updated || perf_events[EV_ID_REF_CPU_CYCLES].updated)) {
- if(unlikely(!cpu_cycles_chart_generated)) {
- cpu_cycles_chart_generated = 1;
-
- printf("CHART %s.%s '' 'CPU cycles' 'cycles/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "cpu_cycles"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_CPU_CYCLES
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "cpu");
- printf("DIMENSION %s '' absolute 1 1\n", "ref_cpu");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "cpu_cycles"
- );
- if(likely(perf_events[EV_ID_CPU_CYCLES].updated)) {
- printf(
- "SET %s = %lld\n"
- , "cpu"
- , (collected_number) perf_events[EV_ID_CPU_CYCLES].value
- );
- }
- if(likely(perf_events[EV_ID_REF_CPU_CYCLES].updated)) {
- printf(
- "SET %s = %lld\n"
- , "ref_cpu"
- , (collected_number) perf_events[EV_ID_REF_CPU_CYCLES].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_INSTRUCTIONS].updated)) {
- if(unlikely(!instructions_chart_generated)) {
- instructions_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Instructions' 'instructions/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "instructions"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_INSTRUCTIONS
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "instructions");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "instructions"
- );
- printf(
- "SET %s = %lld\n"
- , "instructions"
- , (collected_number) perf_events[EV_ID_INSTRUCTIONS].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_INSTRUCTIONS].updated) && likely(perf_events[EV_ID_CPU_CYCLES].updated)) {
- if(unlikely(!ipc_chart_generated)) {
- ipc_chart_generated = 1;
-
- printf("CHART %s.%s '' '%s' 'instructions/cycle' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "instructions_per_cycle"
- , "Instructions per Cycle(IPC)"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_IPC
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 100\n", "ipc");
- }
-
- printf("BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "instructions_per_cycle"
- );
-
- NETDATA_DOUBLE result = ((NETDATA_DOUBLE)perf_events[EV_ID_INSTRUCTIONS].value /
- (NETDATA_DOUBLE)perf_events[EV_ID_CPU_CYCLES].value) * 100.0;
- printf("SET %s = %lld\n"
- , "ipc"
- , (collected_number) result
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_BRANCH_INSTRUCTIONS].updated || perf_events[EV_ID_BRANCH_MISSES].updated)) {
- if(unlikely(!branch_chart_generated)) {
- branch_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Branch instructions' 'instructions/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "branch_instructions"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUCTIONS
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "instructions");
- printf("DIMENSION %s '' absolute 1 1\n", "misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "branch_instructions"
- );
- if(likely(perf_events[EV_ID_BRANCH_INSTRUCTIONS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "instructions"
- , (collected_number) perf_events[EV_ID_BRANCH_INSTRUCTIONS].value
- );
- }
- if(likely(perf_events[EV_ID_BRANCH_MISSES].updated)) {
- printf(
- "SET %s = %lld\n"
- , "misses"
- , (collected_number) perf_events[EV_ID_BRANCH_MISSES].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_CACHE_REFERENCES].updated || perf_events[EV_ID_CACHE_MISSES].updated)) {
- if(unlikely(!cache_chart_generated)) {
- cache_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Cache operations' 'operations/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "cache"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_CACHE
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "references");
- printf("DIMENSION %s '' absolute 1 1\n", "misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "cache"
- );
- if(likely(perf_events[EV_ID_CACHE_REFERENCES].updated)) {
- printf(
- "SET %s = %lld\n"
- , "references"
- , (collected_number) perf_events[EV_ID_CACHE_REFERENCES].value
- );
- }
- if(likely(perf_events[EV_ID_CACHE_MISSES].updated)) {
- printf(
- "SET %s = %lld\n"
- , "misses"
- , (collected_number) perf_events[EV_ID_CACHE_MISSES].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_BUS_CYCLES].updated)) {
- if(unlikely(!bus_cycles_chart_generated)) {
- bus_cycles_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Bus cycles' 'cycles/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "bus_cycles"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_BUS_CYCLES
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "bus");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "bus_cycles"
- );
- printf(
- "SET %s = %lld\n"
- , "bus"
- , (collected_number) perf_events[EV_ID_BUS_CYCLES].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_STALLED_CYCLES_FRONTEND].updated || perf_events[EV_ID_STALLED_CYCLES_BACKEND].updated)) {
- if(unlikely(!stalled_cycles_chart_generated)) {
- stalled_cycles_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Stalled frontend and backend cycles' 'cycles/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "stalled_cycles"
- , RRD_FAMILY_HW
- , NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "frontend");
- printf("DIMENSION %s '' absolute 1 1\n", "backend");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "stalled_cycles"
- );
- if(likely(perf_events[EV_ID_STALLED_CYCLES_FRONTEND].updated)) {
- printf(
- "SET %s = %lld\n"
- , "frontend"
- , (collected_number) perf_events[EV_ID_STALLED_CYCLES_FRONTEND].value
- );
- }
- if(likely(perf_events[EV_ID_STALLED_CYCLES_BACKEND].updated)) {
- printf(
- "SET %s = %lld\n"
- , "backend"
- , (collected_number) perf_events[EV_ID_STALLED_CYCLES_BACKEND].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_CPU_MIGRATIONS].updated)) {
- if(unlikely(!migrations_chart_generated)) {
- migrations_chart_generated = 1;
-
- printf("CHART %s.%s '' 'CPU migrations' 'migrations' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "migrations"
- , RRD_FAMILY_SW
- , NETDATA_CHART_PRIO_PERF_MIGRATIONS
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "migrations");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "migrations"
- );
- printf(
- "SET %s = %lld\n"
- , "migrations"
- , (collected_number) perf_events[EV_ID_CPU_MIGRATIONS].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_ALIGNMENT_FAULTS].updated)) {
- if(unlikely(!alignment_chart_generated)) {
- alignment_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Alignment faults' 'faults' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "alignment_faults"
- , RRD_FAMILY_SW
- , NETDATA_CHART_PRIO_PERF_ALIGNMENT
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "faults");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "alignment_faults"
- );
- printf(
- "SET %s = %lld\n"
- , "faults"
- , (collected_number) perf_events[EV_ID_ALIGNMENT_FAULTS].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_EMULATION_FAULTS].updated)) {
- if(unlikely(!emulation_chart_generated)) {
- emulation_chart_generated = 1;
-
- printf("CHART %s.%s '' 'Emulation faults' 'faults' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "emulation_faults"
- , RRD_FAMILY_SW
- , NETDATA_CHART_PRIO_PERF_EMULATION
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "faults");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "emulation_faults"
- );
- printf(
- "SET %s = %lld\n"
- , "faults"
- , (collected_number) perf_events[EV_ID_EMULATION_FAULTS].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_L1D_READ_ACCESS].updated || perf_events[EV_ID_L1D_READ_MISS].updated
- || perf_events[EV_ID_L1D_WRITE_ACCESS].updated || perf_events[EV_ID_L1D_WRITE_MISS].updated)) {
- if(unlikely(!L1D_chart_generated)) {
- L1D_chart_generated = 1;
-
- printf("CHART %s.%s '' 'L1D cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "l1d_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_L1D
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
- printf("DIMENSION %s '' absolute -1 1\n", "write_access");
- printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "l1d_cache"
- );
- if(likely(perf_events[EV_ID_L1D_READ_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_L1D_READ_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_L1D_READ_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_misses"
- , (collected_number) perf_events[EV_ID_L1D_READ_MISS].value
- );
- }
- if(likely(perf_events[EV_ID_L1D_WRITE_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_access"
- , (collected_number) perf_events[EV_ID_L1D_WRITE_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_L1D_WRITE_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_misses"
- , (collected_number) perf_events[EV_ID_L1D_WRITE_MISS].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_L1D_PREFETCH_ACCESS].updated)) {
- if(unlikely(!L1D_prefetch_chart_generated)) {
- L1D_prefetch_chart_generated = 1;
-
- printf("CHART %s.%s '' 'L1D prefetch cache operations' 'prefetches/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "l1d_cache_prefetch"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_L1D_PREFETCH
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "prefetches");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "l1d_cache_prefetch"
- );
- printf(
- "SET %s = %lld\n"
- , "prefetches"
- , (collected_number) perf_events[EV_ID_L1D_PREFETCH_ACCESS].value
- );
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_L1I_READ_ACCESS].updated || perf_events[EV_ID_L1I_READ_MISS].updated)) {
- if(unlikely(!L1I_chart_generated)) {
- L1I_chart_generated = 1;
-
- printf("CHART %s.%s '' 'L1I cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "l1i_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_L1I
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "l1i_cache"
- );
- if(likely(perf_events[EV_ID_L1I_READ_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_L1I_READ_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_L1I_READ_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_misses"
- , (collected_number) perf_events[EV_ID_L1I_READ_MISS].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_LL_READ_ACCESS].updated || perf_events[EV_ID_LL_READ_MISS].updated
- || perf_events[EV_ID_LL_WRITE_ACCESS].updated || perf_events[EV_ID_LL_WRITE_MISS].updated)) {
- if(unlikely(!LL_chart_generated)) {
- LL_chart_generated = 1;
-
- printf("CHART %s.%s '' 'LL cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "ll_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_LL
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
- printf("DIMENSION %s '' absolute -1 1\n", "write_access");
- printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "ll_cache"
- );
- if(likely(perf_events[EV_ID_LL_READ_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_LL_READ_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_LL_READ_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_misses"
- , (collected_number) perf_events[EV_ID_LL_READ_MISS].value
- );
- }
- if(likely(perf_events[EV_ID_LL_WRITE_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_access"
- , (collected_number) perf_events[EV_ID_LL_WRITE_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_LL_WRITE_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_misses"
- , (collected_number) perf_events[EV_ID_LL_WRITE_MISS].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_DTLB_READ_ACCESS].updated || perf_events[EV_ID_DTLB_READ_MISS].updated
- || perf_events[EV_ID_DTLB_WRITE_ACCESS].updated || perf_events[EV_ID_DTLB_WRITE_MISS].updated)) {
- if(unlikely(!DTLB_chart_generated)) {
- DTLB_chart_generated = 1;
-
- printf("CHART %s.%s '' 'DTLB cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "dtlb_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_DTLB
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
- printf("DIMENSION %s '' absolute -1 1\n", "write_access");
- printf("DIMENSION %s '' absolute -1 1\n", "write_misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "dtlb_cache"
- );
- if(likely(perf_events[EV_ID_DTLB_READ_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_DTLB_READ_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_DTLB_READ_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_misses"
- , (collected_number) perf_events[EV_ID_DTLB_READ_MISS].value
- );
- }
- if(likely(perf_events[EV_ID_DTLB_WRITE_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_access"
- , (collected_number) perf_events[EV_ID_DTLB_WRITE_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_DTLB_WRITE_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "write_misses"
- , (collected_number) perf_events[EV_ID_DTLB_WRITE_MISS].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_ITLB_READ_ACCESS].updated || perf_events[EV_ID_ITLB_READ_MISS].updated)) {
- if(unlikely(!ITLB_chart_generated)) {
- ITLB_chart_generated = 1;
-
- printf("CHART %s.%s '' 'ITLB cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "itlb_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_ITLB
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- printf("DIMENSION %s '' absolute 1 1\n", "read_misses");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "itlb_cache"
- );
- if(likely(perf_events[EV_ID_ITLB_READ_ACCESS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_ITLB_READ_ACCESS].value
- );
- }
- if(likely(perf_events[EV_ID_ITLB_READ_MISS].updated)) {
- printf(
- "SET %s = %lld\n"
- , "read_misses"
- , (collected_number) perf_events[EV_ID_ITLB_READ_MISS].value
- );
- }
- printf("END\n");
- }
-
- // ------------------------------------------------------------------------
-
- if(likely(perf_events[EV_ID_PBU_READ_ACCESS].updated)) {
- if(unlikely(!PBU_chart_generated)) {
- PBU_chart_generated = 1;
-
- printf("CHART %s.%s '' 'PBU cache operations' 'events/s' %s '' line %d %d '' %s\n"
- , RRD_TYPE_PERF
- , "pbu_cache"
- , RRD_FAMILY_CACHE
- , NETDATA_CHART_PRIO_PERF_PBU
- , update_every
- , PLUGIN_PERF_NAME
- );
- printf("DIMENSION %s '' absolute 1 1\n", "read_access");
- }
-
- printf(
- "BEGIN %s.%s\n"
- , RRD_TYPE_PERF
- , "pbu_cache"
- );
- printf(
- "SET %s = %lld\n"
- , "read_access"
- , (collected_number) perf_events[EV_ID_PBU_READ_ACCESS].value
- );
- printf("END\n");
- }
-}
-
-void parse_command_line(int argc, char **argv) {
- int i, plugin_enabled = 0;
-
- for(i = 1; i < argc ; i++) {
- if(isdigit(*argv[i]) && !freq) {
- int n = str2i(argv[i]);
- if(n > 0 && n < 86400) {
- freq = n;
- continue;
- }
- }
- else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("perf.plugin %s\n", VERSION);
- exit(0);
- }
- else if(strcmp("all", argv[i]) == 0) {
- struct perf_event *current_event = NULL;
-
- for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++)
- current_event->disabled = 0;
-
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("cycles", argv[i]) == 0) {
- perf_events[EV_ID_CPU_CYCLES].disabled = 0;
- perf_events[EV_ID_REF_CPU_CYCLES].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("instructions", argv[i]) == 0) {
- perf_events[EV_ID_INSTRUCTIONS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("branch", argv[i]) == 0) {
- perf_events[EV_ID_BRANCH_INSTRUCTIONS].disabled = 0;
- perf_events[EV_ID_BRANCH_MISSES].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("cache", argv[i]) == 0) {
- perf_events[EV_ID_CACHE_REFERENCES].disabled = 0;
- perf_events[EV_ID_CACHE_MISSES].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("bus", argv[i]) == 0) {
- perf_events[EV_ID_BUS_CYCLES].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("stalled", argv[i]) == 0) {
- perf_events[EV_ID_STALLED_CYCLES_FRONTEND].disabled = 0;
- perf_events[EV_ID_STALLED_CYCLES_BACKEND].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("migrations", argv[i]) == 0) {
- perf_events[EV_ID_CPU_MIGRATIONS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("alignment", argv[i]) == 0) {
- perf_events[EV_ID_ALIGNMENT_FAULTS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("emulation", argv[i]) == 0) {
- perf_events[EV_ID_EMULATION_FAULTS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("L1D", argv[i]) == 0) {
- perf_events[EV_ID_L1D_READ_ACCESS].disabled = 0;
- perf_events[EV_ID_L1D_READ_MISS].disabled = 0;
- perf_events[EV_ID_L1D_WRITE_ACCESS].disabled = 0;
- perf_events[EV_ID_L1D_WRITE_MISS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("L1D-prefetch", argv[i]) == 0) {
- perf_events[EV_ID_L1D_PREFETCH_ACCESS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("L1I", argv[i]) == 0) {
- perf_events[EV_ID_L1I_READ_ACCESS].disabled = 0;
- perf_events[EV_ID_L1I_READ_MISS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("LL", argv[i]) == 0) {
- perf_events[EV_ID_LL_READ_ACCESS].disabled = 0;
- perf_events[EV_ID_LL_READ_MISS].disabled = 0;
- perf_events[EV_ID_LL_WRITE_ACCESS].disabled = 0;
- perf_events[EV_ID_LL_WRITE_MISS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("DTLB", argv[i]) == 0) {
- perf_events[EV_ID_DTLB_READ_ACCESS].disabled = 0;
- perf_events[EV_ID_DTLB_READ_MISS].disabled = 0;
- perf_events[EV_ID_DTLB_WRITE_ACCESS].disabled = 0;
- perf_events[EV_ID_DTLB_WRITE_MISS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("ITLB", argv[i]) == 0) {
- perf_events[EV_ID_ITLB_READ_ACCESS].disabled = 0;
- perf_events[EV_ID_ITLB_READ_MISS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("PBU", argv[i]) == 0) {
- perf_events[EV_ID_PBU_READ_ACCESS].disabled = 0;
- plugin_enabled = 1;
- continue;
- }
- else if(strcmp("debug", argv[i]) == 0) {
- debug = 1;
- continue;
- }
- else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata perf.plugin %s\n"
- " Copyright (C) 2019 Netdata Inc.\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " COLLECTION_FREQUENCY data collection frequency in seconds\n"
- " minimum: %d\n"
- "\n"
- " all enable all charts\n"
- "\n"
- " cycles enable CPU cycles chart\n"
- "\n"
- " instructions enable Instructions chart\n"
- "\n"
- " branch enable Branch instructions chart\n"
- "\n"
- " cache enable Cache operations chart\n"
- "\n"
- " bus enable Bus cycles chart\n"
- "\n"
- " stalled enable Stalled frontend and backend cycles chart\n"
- "\n"
- " migrations enable CPU migrations chart\n"
- "\n"
- " alignment enable Alignment faults chart\n"
- "\n"
- " emulation enable Emulation faults chart\n"
- "\n"
- " L1D enable L1D cache operations chart\n"
- "\n"
- " L1D-prefetch enable L1D prefetch cache operations chart\n"
- "\n"
- " L1I enable L1I cache operations chart\n"
- "\n"
- " LL enable LL cache operations chart\n"
- "\n"
- " DTLB enable DTLB cache operations chart\n"
- "\n"
- " ITLB enable ITLB cache operations chart\n"
- "\n"
- " PBU enable PBU cache operations chart\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n"
- " -v\n"
- " -V\n"
- " --version print version and exit\n"
- "\n"
- " -h\n"
- " --help print this message and exit\n"
- "\n"
- " For more information:\n"
- " https://github.com/netdata/netdata/tree/master/collectors/perf.plugin\n"
- "\n"
- , VERSION
- , update_every
- );
- exit(1);
- }
-
- collector_error("ignoring parameter '%s'", argv[i]);
- }
-
- if(!plugin_enabled){
- collector_info("no charts enabled - nothing to do.");
- printf("DISABLE\n");
- exit(1);
- }
-}
-
-int main(int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("perf.plugin");
-
- parse_command_line(argc, argv);
-
- errno = 0;
-
- if(freq >= update_every)
- update_every = freq;
- else if(freq)
- collector_error("update frequency %d seconds is too small for PERF. Using %d.", freq, update_every);
-
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_init()\n");
- int perf = !perf_init();
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: starting data collection\n");
-
- time_t started_t = now_monotonic_sec();
-
- size_t iteration;
- usec_t step = update_every * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
-
- if(unlikely(netdata_exit)) break;
-
- if(unlikely(debug && iteration))
- fprintf(stderr, "perf.plugin: iteration %zu, dt %"PRIu64" usec\n"
- , iteration
- , dt
- );
-
- if(likely(perf)) {
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_collect()\n");
- perf = !perf_collect();
-
- if(likely(perf)) {
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n");
- perf_send_metrics();
- }
- }
-
- fflush(stdout);
-
- // restart check (14400 seconds)
- if(now_monotonic_sec() - started_t > 14400) break;
- }
-
- collector_info("process exiting");
- perf_free();
-}
diff --git a/collectors/plugins.d/Makefile.am b/collectors/plugins.d/Makefile.am
deleted file mode 100644
index 67fed309d..000000000
--- a/collectors/plugins.d/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- $(NULL)
-
-dist_noinst_DATA = \
- gperf-config.txt \
- README.md \
- $(NULL)
diff --git a/collectors/plugins.d/README.md b/collectors/plugins.d/README.md
deleted file mode 100644
index 0752d389b..000000000
--- a/collectors/plugins.d/README.md
+++ /dev/null
@@ -1,680 +0,0 @@
-<!--
-title: "External plugins"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/plugins.d/README.md"
-sidebar_label: "External plugins"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/External plugins"
--->
-
-# External plugins
-
-`plugins.d` is the Netdata internal plugin that collects metrics
-from external processes, thus allowing Netdata to use **external plugins**.
-
-## Provided External Plugins
-
-| plugin | language | O/S | description |
-|:------------------------------------------------------------------------------------------------------:|:--------:|:--------------:|:----------------------------------------------------------------------------------------------------------------------------------------|
-| [apps.plugin](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) | `C` | linux, freebsd | monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. |
-| [charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md) | `BASH` | all | a **plugin orchestrator** for data collection modules written in `BASH` v4+. |
-| [cups.plugin](https://github.com/netdata/netdata/blob/master/collectors/cups.plugin/README.md) | `C` | all | monitors **CUPS** |
-| [ebpf.plugin](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md) | `C` | linux | monitors different metrics on environments using kernel internal functions. |
-| [go.d.plugin](https://github.com/netdata/go.d.plugin/blob/master/README.md) | `GO` | all | collects metrics from the system, applications, or third-party APIs. |
-| [ioping.plugin](https://github.com/netdata/netdata/blob/master/collectors/ioping.plugin/README.md) | `C` | all | measures disk latency. |
-| [freeipmi.plugin](https://github.com/netdata/netdata/blob/master/collectors/freeipmi.plugin/README.md) | `C` | linux | collects metrics from enterprise hardware sensors, on Linux servers. |
-| [nfacct.plugin](https://github.com/netdata/netdata/blob/master/collectors/nfacct.plugin/README.md) | `C` | linux | collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`. |
-| [xenstat.plugin](https://github.com/netdata/netdata/blob/master/collectors/xenstat.plugin/README.md) | `C` | linux | collects XenServer and XCP-ng metrics using `lxenstat`. |
-| [perf.plugin](https://github.com/netdata/netdata/blob/master/collectors/perf.plugin/README.md) | `C` | linux | collects CPU performance metrics using performance monitoring units (PMU). |
-| [python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md) | `python` | all | a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). |
-| [slabinfo.plugin](https://github.com/netdata/netdata/blob/master/collectors/slabinfo.plugin/README.md) | `C` | linux | collects kernel internal cache objects (SLAB) metrics. |
-
-Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
-Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
-
-## Motivation
-
-This plugin allows Netdata to use **external plugins** for data collection:
-
-1. external data collection plugins may be written in any computer language.
-
-2. external data collection plugins may use O/S capabilities or `setuid` to
- run with escalated privileges (compared to the `netdata` daemon).
- The communication between the external plugin and Netdata is unidirectional
- (from the plugin to Netdata), so that Netdata cannot manipulate an external
- plugin running with escalated privileges.
-
-## Operation
-
-Each of the external plugins is expected to run forever.
-Netdata will start it when it starts and stop it when it exits.
-
-If the external plugin exits or crashes, Netdata will log an error.
-If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again.
-
-- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
-- Plugins may also be disabled by Netdata if they output things that Netdata does not understand.
-
-The `stdout` of external plugins is connected to Netdata to receive metrics,
-with the API defined below.
-
-The `stderr` of external plugins is connected to Netdata's `error.log`.
-
-Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
-
-## Configuration
-
-Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
-
-The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
-
-Example:
-
-```
-[plugins]
- # enable running new plugins = yes
- # check for new plugins every = 60
-
- # charts.d = yes
- # ioping = yes
- # python.d = yes
-```
-
-The setting `enable running new plugins` sets the default behavior for all external plugins. It can be
-overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`.
-
-The setting `check for new plugins every` sets the interval between scans of the directory
-`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner.
-
-For each of the external plugins enabled, another `netdata.conf` section
-is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
-This section allows controlling the update frequency of the plugin and provide
-additional command line arguments to it.
-
-For example, for `apps.plugin` the following section is available:
-
-```
-[plugin:apps]
- # update every = 1
- # command options =
-```
-
-- `update every` controls the granularity of the external plugin.
-- `command options` allows giving additional command line options to the plugin.
-
-Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
-
-Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
-
-Other than the above, the plugin configuration is up to the plugin.
-
-Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
-
-### Autoconfiguration
-
-Plugins should attempt to autoconfigure themselves when possible.
-
-For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again.
-
-## External Plugins API
-
-Any program that can print a few values to its standard output can become a Netdata external plugin.
-
-Netdata parses lines starting with:
-
-- `CHART` - create or update a chart
-- `DIMENSION` - add or update a dimension to the chart just created
-- `VARIABLE` - define a variable (to be used in health calculations)
-- `CLABEL` - add a label to a chart
-- `CLABEL_COMMIT` - commit added labels to the chart
-- `FUNCTION` - define a function that can be called later to execute it
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-- `FLUSH` - ignore the last collected values
-- `DISABLE` - disable this plugin
-
-a single program can produce any number of charts with any number of dimensions each.
-
-Charts can be added any time (not just the beginning).
-
-### Command line parameters
-
-The plugin **MUST** accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by Netdata
-to the plugin is controlled via its configuration file (so there is no need
-for the plugin to handle this configuration option).
-
-The external plugin can overwrite the update frequency. For example, the server may
-request per second updates, but the plugin may ignore it and update its charts
-every 5 seconds.
-
-### Environment variables
-
-There are a few environment variables that are set by `netdata` and are
-available for the plugin to use.
-
-| variable | description |
-|:---------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `NETDATA_USER_CONFIG_DIR` | The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). |
-| `NETDATA_STOCK_CONFIG_DIR` | The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). |
-| `NETDATA_PLUGINS_DIR` | The directory where all Netdata plugins are stored. |
-| `NETDATA_USER_PLUGINS_DIRS` | The list of directories where custom plugins are stored. |
-| `NETDATA_WEB_DIR` | The directory where the web files of Netdata are saved. |
-| `NETDATA_CACHE_DIR` | The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. |
-| `NETDATA_LOG_DIR` | The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata. |
-| `NETDATA_HOST_PREFIX` | This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. |
-| `NETDATA_DEBUG_FLAGS` | This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information. |
-| `NETDATA_UPDATE_EVERY` | The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. |
-
-### The output of the plugin
-
-The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
-
-#### DISABLE
-
-`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
-
-#### HOST_DEFINE
-
-`HOST_DEFINE` defines a new (or updates an existing) virtual host.
-
-The template is:
-
-> HOST_DEFINE machine_guid hostname
-
-where:
-
-- `machine_guid`
-
- uniquely identifies the host, this is what will be needed to add charts to the host.
-
-- `hostname`
-
- is the hostname of the virtual host
-
-#### HOST_LABEL
-
-`HOST_LABEL` adds a key-value pair to the virtual host labels. It has to be given between `HOST_DEFINE` and `HOST_DEFINE_END`.
-
-The template is:
-
-> HOST_LABEL key value
-
-where:
-
-- `key`
-
- uniquely identifies the key of the label
-
-- `value`
-
- is the value associated with this key
-
-There are a few special keys that are used to define the system information of the monitored system:
-
-- `_cloud_provider_type`
-- `_cloud_instance_type`
-- `_cloud_instance_region`
-- `_os_name`
-- `_os_version`
-- `_kernel_version`
-- `_system_cores`
-- `_system_cpu_freq`
-- `_system_ram_total`
-- `_system_disk_space`
-- `_architecture`
-- `_virtualization`
-- `_container`
-- `_container_detection`
-- `_virt_detection`
-- `_is_k8s_node`
-- `_install_type`
-- `_prebuilt_arch`
-- `_prebuilt_dist`
-
-#### HOST_DEFINE_END
-
-`HOST_DEFINE_END` commits the host information, creating a new host entity, or updating an existing one with the same `machine_guid`.
-
-#### HOST
-
-`HOST` switches data collection between hosts.
-
-The template is:
-
-> HOST machine_guid
-
-where:
-
-- `machine_guid`
-
- is the UUID of the host to switch to. After this command, every other command following it is assumed to be associated with this host.
- Setting machine_guid to `localhost` switches data collection to the local host.
-
-#### CHART
-
-`CHART` defines a new chart.
-
-the template is:
-
-> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]]
-
- where:
-
-- `type.id`
-
- uniquely identifies the chart,
- this is what will be needed to add values to the chart
-
- the `type` part controls the menu the charts will appear in
-
-- `name`
-
- is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of
- `type.id` is changed. When a name has been given, the chart is indexed (and can be referred) as both `type.id` and
- `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. If a chart with the same name already
- exists, a serial number is automatically attached to the name to avoid naming collisions.
-
-- `title`
-
- the text above the chart
-
-- `units`
-
- the label of the vertical axis of the chart,
- all dimensions added to a chart should have the same units
- of measurement
-
-- `family`
-
- is used to group charts together
- (for example all eth0 charts should say: eth0),
- if empty or missing, the `id` part of `type.id` will be used
-
- this controls the sub-menu on the dashboard
-
-- `context`
-
- the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
-
- this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alerts to it
-
-- `charttype`
-
- one of `line`, `area` or `stacked`,
- if empty or missing, the `line` will be used
-
-- `priority`
-
- is the relative priority of the charts as rendered on the web page,
- lower numbers make the charts appear before the ones with higher numbers,
- if empty or missing, `1000` will be used
-
-- `update_every`
-
- overwrite the update frequency set by the server,
- if empty or missing, the user configured value will be used
-
-- `options`
-
- a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to external databases). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
-
-- `plugin` and `module`
-
- both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
-
-#### DIMENSION
-
-`DIMENSION` defines a new dimension for the chart
-
-the template is:
-
-> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]]
-
- where:
-
-- `id`
-
- the `id` of this dimension (it is a text value, not numeric),
- this will be needed later to add values to the dimension
-
- We suggest to avoid using `.` in dimension ids. External databases expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
-
-- `name`
-
- the name of the dimension as it will appear at the legend of the chart,
- if empty or missing the `id` will be used
-
-- `algorithm`
-
- one of:
-
- - `absolute`
-
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
-
- - `incremental`
-
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
-
- - `percentage-of-absolute-row`
-
- the % of this value compared to the total of all dimensions
-
- - `percentage-of-incremental-row`
-
- the % of this value compared to the incremental total of
- all dimensions
-
-- `multiplier`
-
- an integer value to multiply the collected value,
- if empty or missing, `1` is used
-
-- `divisor`
-
- an integer value to divide the collected value,
- if empty or missing, `1` is used
-
-- `options`
-
- a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
-
-#### VARIABLE
-
-> VARIABLE [SCOPE] name = value
-
-`VARIABLE` defines a variable that can be used in alerts. This is to used for setting constants (like the max connections a server may accept).
-
-Variables support 2 scopes:
-
-- `GLOBAL` or `HOST` to define the variable at the host level.
-- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alert templates.
-
-The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
-
-These variables can be set and updated at any point.
-
-Variable names should use alphanumeric characters, the `.` and the `_`.
-
-The `value` is floating point (Netdata used `long double`).
-
-Variables are transferred to upstream Netdata servers (streaming and database replication).
-
-#### CLABEL
-
-> CLABEL name value source
-
-`CLABEL` defines a label used to organize and identify a chart.
-
-Name and value accept characters according to the following table:
-
-| Character | Symbol | Label Name | Label Value |
-|---------------------|:------:|:----------:|:-----------:|
-| UTF-8 character | UTF-8 | _ | keep |
-| Lower case letter | [a-z] | keep | keep |
-| Upper case letter | [A-Z] | keep | [a-z] |
-| Digit | [0-9] | keep | keep |
-| Underscore | _ | keep | keep |
-| Minus | - | keep | keep |
-| Plus | + | _ | keep |
-| Colon | : | _ | keep |
-| Semicolon | ; | _ | : |
-| Equal | = | _ | : |
-| Period | . | keep | keep |
-| Comma | , | . | . |
-| Slash | / | keep | keep |
-| Backslash | \ | / | / |
-| At | @ | _ | keep |
-| Space | ' ' | _ | keep |
-| Opening parenthesis | ( | _ | keep |
-| Closing parenthesis | ) | _ | keep |
-| Anything else | | _ | _ |
-
-The `source` is an integer field that can have the following values:
-- `1`: The value was set automatically.
-- `2`: The value was set manually.
-- `4`: This is a K8 label.
-- `8`: This is a label defined using `netdata` agent cloud link.
-
-#### CLABEL_COMMIT
-
-`CLABEL_COMMIT` indicates that all labels were defined and the chart can be updated.
-
-#### FUNCTION
-
-> FUNCTION [GLOBAL] "name and parameters of the function" timeout "help string for users"
-
-A function can be used by users to ask for more information from the collector. Netdata maintains a registry of functions in 2 levels:
-
-- per node
-- per chart
-
-Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context sensitive menu of functions related to the chart the user is using.
-
-A function is identified by a string. The allowed characters in the function definition are:
-
-| Character | Symbol | In Functions |
-|-------------------|:------:|:------------:|
-| UTF-8 character | UTF-8 | keep |
-| Lower case letter | [a-z] | keep |
-| Upper case letter | [A-Z] | keep |
-| Digit | [0-9] | keep |
-| Underscore | _ | keep |
-| Comma | , | keep |
-| Minus | - | keep |
-| Period | . | keep |
-| Colon | : | keep |
-| Slash | / | keep |
-| Space | ' ' | keep |
-| Semicolon | ; | : |
-| Equal | = | : |
-| Backslash | \ | / |
-| Anything else | | _ |
-
-Uses can get a list of all the registered functions using the `/api/v1/functions` end point of Netdata.
-
-Users can call functions using the `/api/v1/function` end point of Netdata.
-Once a function is called, the plugin will receive at its standard input a command that looks like this:
-
-> FUNCTION transaction_id timeout "name and parameters of the function"
-
-The plugin is expected to parse and validate `name and parameters of the function`. Netdata allows users to edit this string, append more parameters or even change the ones the plugin originally exposed. To minimize the security risk, Netdata guarantees that only the characters shown above are accepted in function definitions, but still the plugin should carefully inspect the `name and parameters of the function` to ensure that it is valid and not harmful.
-
-If the plugin rejects the request, it should respond with this:
-
-```
-FUNCTION_RESULT_BEGIN transaction_id 400 application/json
-{
- "status": 400,
- "error_message": "description of the rejection reasons"
-}
-FUNCTION_RESULT_END
-```
-
-If the plugin prepares a response, it should send (via its standard output, together with the collected data, but not interleaved with them):
-
-> FUNCTION_RESULT_BEGIN transaction_id http_error_code content_type expiration
-
-Where:
-
- - `transaction_id` is the transaction id that Netdata sent for this function execution
- - `http_error` is the http error code Netdata should respond with, 200 is the "ok" response
- - `content_type` is the content type of the response
- - `expiration` is the absolute timestamp (number, unix epoch) this response expires
-
-Immediately after this, all text is assumed to be the response content.
-The content is text and line oriented. The maximum line length accepted is 15kb. Longer lines will be truncated.
-The type of the context itself depends on the plugin and the UI.
-
-To terminate the message, Netdata seeks a line with just this:
-
-> FUNCTION_RESULT_END
-
-This defines the end of the message. `FUNCTION_RESULT_END` should appear in a line alone, without any other text, so it is wise to add `\n` before and after it.
-
-After this line, Netdata resumes processing collected metrics from the plugin.
-
-## Data collection
-
-data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
-
-> BEGIN type.id [microseconds]
-
-- `type.id`
-
- is the unique identification of the chart (as given in `CHART`)
-
-- `microseconds`
-
- is the number of microseconds since the last update of the chart. It is optional.
-
- Under heavy system load, the system may have some latency transferring
- data from the plugins to Netdata via the pipe. This number improves
- accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than Netdata.
-
- The first time the plugin is started, no microseconds should be given
- to Netdata.
-
-> SET id = value
-
-- `id`
-
- is the unique identification of the dimension (of the chart just began)
-
-- `value`
-
- is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
-
-> END
-
- END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
-
-More `SET` lines may appear to update all the dimensions of the chart.
-All of them in one `BEGIN` -> `END` block.
-
-All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
-same chart.
-
-If more charts need to be updated, each chart should have its own
-`BEGIN` -> `SET` -> `END` block.
-
-If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore
-all the values collected since the last `BEGIN` command.
-
-If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by Netdata.
-
-### collected values
-
-Netdata will collect any **signed** value in the 64bit range:
-`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
-
-If a value is not collected, leave it empty, like this:
-
-`SET id =`
-
-or do not output the line at all.
-
-## Modular Plugins
-
-1. **python**, use `python.d.plugin`, there are many examples in the [python.d
- directory](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md)
-
- python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
-
-2. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
- directory](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md)
-
- BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
-
-3. **C**
-
- Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C.
-
-## Writing Plugins Properly
-
-There are a few rules for writing plugins properly:
-
-1. Respect system resources
-
- Pay special attention to efficiency:
-
- - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
- - Do the absolutely minimum while iterating to collect values repeatedly.
- - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
- - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
- - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
-
-2. The best way to iterate at a constant pace is this pseudo code:
-
-```js
- var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
-
- readConfiguration();
-
- if(!verifyWeCanCollectValues()) {
- print("DISABLE");
- exit(1);
- }
-
- createCharts(); /* print CHART and DIMENSION statements */
-
- var loops = 0;
- var last_run = 0;
- var next_run = 0;
- var dt_since_last_run = 0;
- var now = 0;
-
- while(true) {
- /* find the current time in milliseconds */
- now = currentTimeStampInMilliseconds();
-
- /*
- * find the time of the next loop
- * this makes sure we are always aligned
- * with the Netdata daemon
- */
- next_run = now - (now % update_every) + update_every;
-
- /*
- * wait until it is time
- * it is important to do it in a loop
- * since many wait functions can be interrupted
- */
- while( now < next_run ) {
- sleepMilliseconds(next_run - now);
- now = currentTimeStampInMilliseconds();
- }
-
- /* calculate the time passed since the last run */
- if ( loops > 0 )
- dt_since_last_run = (now - last_run) * 1000; /* in microseconds */
-
- /* prepare for the next loop */
- last_run = now;
- loops++;
-
- /* do your magic here to collect values */
- collectValues();
-
- /* send the collected data to Netdata */
- printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
- }
-```
-
- Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection.
-
- Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
-
-3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
-
-4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
-
-
diff --git a/collectors/plugins.d/gperf-config.txt b/collectors/plugins.d/gperf-config.txt
deleted file mode 100644
index bad51367c..000000000
--- a/collectors/plugins.d/gperf-config.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-%struct-type
-%omit-struct-type
-%define hash-function-name gperf_keyword_hash_function
-%define lookup-function-name gperf_lookup_keyword
-%define word-array-name gperf_keywords
-%define constants-prefix GPERF_PARSER_
-%define slot-name keyword
-%global-table
-%null-strings
-PARSER_KEYWORD;
-%%
-#
-# Plugins Only Keywords
-#
-FLUSH, 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1
-DISABLE, 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2
-EXIT, 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3
-HOST, 71, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4
-HOST_DEFINE, 72, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5
-HOST_DEFINE_END, 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6
-HOST_LABEL, 74, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7
-#
-# Common keywords
-#
-BEGIN, 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8
-CHART, 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9
-CLABEL, 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10
-CLABEL_COMMIT, 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11
-DIMENSION, 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12
-END, 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13
-FUNCTION, 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14
-FUNCTION_RESULT_BEGIN, 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15
-LABEL, 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 16
-OVERWRITE, 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17
-SET, 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18
-VARIABLE, 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 19
-DYNCFG_ENABLE, 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20
-DYNCFG_REGISTER_MODULE, 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21
-DYNCFG_REGISTER_JOB, 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22
-DYNCFG_RESET, 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23
-REPORT_JOB_STATUS, 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24
-DELETE_JOB, 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25
-#
-# Streaming only keywords
-#
-CLAIMED_ID, 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26
-BEGIN2, 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27
-SET2, 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28
-END2, 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29
-#
-# Streaming Replication keywords
-#
-CHART_DEFINITION_END, 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 30
-RBEGIN, 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31
-RDSTATE, 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32
-REND, 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33
-RSET, 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34
-RSSTATE, 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35
diff --git a/collectors/plugins.d/gperf-hashtable.h b/collectors/plugins.d/gperf-hashtable.h
deleted file mode 100644
index b327d8d6d..000000000
--- a/collectors/plugins.d/gperf-hashtable.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* ANSI-C code produced by gperf version 3.1 */
-/* Command-line: gperf --multiple-iterations=1000 --output-file=gperf-hashtable.h gperf-config.txt */
-/* Computed positions: -k'1-2' */
-
-#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
- && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
- && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
- && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
- && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
- && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
- && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
- && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
- && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
- && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
- && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
- && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
- && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
- && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
- && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
- && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
- && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
- && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
- && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
- && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
- && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
- && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
- && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
-/* The character set is not based on ISO-646. */
-#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."
-#endif
-
-
-#define GPERF_PARSER_TOTAL_KEYWORDS 35
-#define GPERF_PARSER_MIN_WORD_LENGTH 3
-#define GPERF_PARSER_MAX_WORD_LENGTH 22
-#define GPERF_PARSER_MIN_HASH_VALUE 3
-#define GPERF_PARSER_MAX_HASH_VALUE 47
-/* maximum key range = 45, duplicates = 0 */
-
-#ifdef __GNUC__
-__inline
-#else
-#ifdef __cplusplus
-inline
-#endif
-#endif
-static unsigned int
-gperf_keyword_hash_function (register const char *str, register size_t len)
-{
- static unsigned char asso_values[] =
- {
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 11, 18, 0, 0, 0,
- 6, 48, 9, 0, 48, 48, 20, 48, 0, 8,
- 48, 48, 1, 12, 48, 20, 18, 48, 2, 0,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
- 48, 48, 48, 48, 48, 48
- };
- return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]];
-}
-
-static PARSER_KEYWORD gperf_keywords[] =
- {
- {(char*)0}, {(char*)0}, {(char*)0},
-#line 30 "gperf-config.txt"
- {"END", 13, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13},
-#line 49 "gperf-config.txt"
- {"END2", 3, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29},
-#line 56 "gperf-config.txt"
- {"REND", 25, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33},
-#line 17 "gperf-config.txt"
- {"EXIT", 99, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3},
-#line 16 "gperf-config.txt"
- {"DISABLE", 98, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2},
-#line 55 "gperf-config.txt"
- {"RDSTATE", 23, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32},
-#line 29 "gperf-config.txt"
- {"DIMENSION", 31, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12},
-#line 42 "gperf-config.txt"
- {"DELETE_JOB", 111, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25},
- {(char*)0},
-#line 40 "gperf-config.txt"
- {"DYNCFG_RESET", 104, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23},
-#line 37 "gperf-config.txt"
- {"DYNCFG_ENABLE", 101, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 20},
-#line 26 "gperf-config.txt"
- {"CHART", 32, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9},
-#line 35 "gperf-config.txt"
- {"SET", 11, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 18},
-#line 48 "gperf-config.txt"
- {"SET2", 1, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28},
-#line 57 "gperf-config.txt"
- {"RSET", 21, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34},
-#line 41 "gperf-config.txt"
- {"REPORT_JOB_STATUS", 110, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24},
-#line 39 "gperf-config.txt"
- {"DYNCFG_REGISTER_JOB", 103, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 22},
-#line 58 "gperf-config.txt"
- {"RSSTATE", 24, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35},
-#line 18 "gperf-config.txt"
- {"HOST", 71, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4},
-#line 38 "gperf-config.txt"
- {"DYNCFG_REGISTER_MODULE", 102, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 21},
-#line 25 "gperf-config.txt"
- {"BEGIN", 12, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8},
-#line 47 "gperf-config.txt"
- {"BEGIN2", 2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27},
-#line 54 "gperf-config.txt"
- {"RBEGIN", 22, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31},
-#line 27 "gperf-config.txt"
- {"CLABEL", 34, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10},
-#line 21 "gperf-config.txt"
- {"HOST_LABEL", 74, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7},
-#line 19 "gperf-config.txt"
- {"HOST_DEFINE", 72, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5},
-#line 53 "gperf-config.txt"
- {"CHART_DEFINITION_END", 33, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 30},
-#line 46 "gperf-config.txt"
- {"CLAIMED_ID", 61, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26},
-#line 15 "gperf-config.txt"
- {"FLUSH", 97, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1},
-#line 20 "gperf-config.txt"
- {"HOST_DEFINE_END", 73, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6},
-#line 28 "gperf-config.txt"
- {"CLABEL_COMMIT", 35, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11},
-#line 31 "gperf-config.txt"
- {"FUNCTION", 41, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14},
-#line 34 "gperf-config.txt"
- {"OVERWRITE", 52, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17},
-#line 33 "gperf-config.txt"
- {"LABEL", 51, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 16},
-#line 36 "gperf-config.txt"
- {"VARIABLE", 53, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 19},
- {(char*)0}, {(char*)0}, {(char*)0}, {(char*)0},
- {(char*)0}, {(char*)0}, {(char*)0}, {(char*)0},
- {(char*)0},
-#line 32 "gperf-config.txt"
- {"FUNCTION_RESULT_BEGIN", 42, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15}
- };
-
-PARSER_KEYWORD *
-gperf_lookup_keyword (register const char *str, register size_t len)
-{
- if (len <= GPERF_PARSER_MAX_WORD_LENGTH && len >= GPERF_PARSER_MIN_WORD_LENGTH)
- {
- register unsigned int key = gperf_keyword_hash_function (str, len);
-
- if (key <= GPERF_PARSER_MAX_HASH_VALUE)
- {
- register const char *s = gperf_keywords[key].keyword;
-
- if (s && *str == *s && !strcmp (str + 1, s + 1))
- return &gperf_keywords[key];
- }
- }
- return 0;
-}
diff --git a/collectors/plugins.d/local_listeners.c b/collectors/plugins.d/local_listeners.c
deleted file mode 100644
index f2c5e688b..000000000
--- a/collectors/plugins.d/local_listeners.c
+++ /dev/null
@@ -1,400 +0,0 @@
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <dirent.h>
-#include <string.h>
-#include <unistd.h>
-#include <ctype.h>
-#include <arpa/inet.h>
-
-typedef enum {
- PROC_NET_PROTOCOL_TCP,
- PROC_NET_PROTOCOL_TCP6,
- PROC_NET_PROTOCOL_UDP,
- PROC_NET_PROTOCOL_UDP6,
-} PROC_NET_PROTOCOLS;
-
-#define MAX_ERROR_LOGS 10
-
-static size_t pid_fds_processed = 0;
-static size_t pid_fds_failed = 0;
-static size_t errors_encountered = 0;
-
-static inline const char *protocol_name(PROC_NET_PROTOCOLS protocol) {
- switch(protocol) {
- default:
- case PROC_NET_PROTOCOL_TCP:
- return "TCP";
-
- case PROC_NET_PROTOCOL_UDP:
- return "UDP";
-
- case PROC_NET_PROTOCOL_TCP6:
- return "TCP6";
-
- case PROC_NET_PROTOCOL_UDP6:
- return "UDP6";
- }
-}
-
-static inline int read_cmdline(pid_t pid, char* buffer, size_t bufferSize) {
- char path[FILENAME_MAX + 1];
- snprintfz(path, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, pid);
-
- FILE* file = fopen(path, "r");
- if (!file) {
- if(++errors_encountered < MAX_ERROR_LOGS)
- collector_error("LOCAL-LISTENERS: error opening file: %s\n", path);
-
- return -1;
- }
-
- size_t bytesRead = fread(buffer, 1, bufferSize - 1, file);
- buffer[bytesRead] = '\0'; // Ensure null-terminated
-
- // Replace null characters in cmdline with spaces
- for (size_t i = 0; i < bytesRead; i++) {
- if (buffer[i] == '\0') {
- buffer[i] = ' ';
- }
- }
-
- fclose(file);
- return 0;
-}
-
-static inline void fix_cmdline(char* str) {
- if (str == NULL)
- return;
-
- char *s = str;
-
- do {
- if(*s == '|' || iscntrl(*s))
- *s = '_';
-
- } while(*++s);
-
-
- while(s > str && *(s-1) == ' ')
- *--s = '\0';
-}
-
-// ----------------------------------------------------------------------------
-
-#define HASH_TABLE_SIZE 100000
-
-typedef struct Node {
- unsigned int inode; // key
-
- // values
- unsigned int port;
- char local_address[INET6_ADDRSTRLEN];
- PROC_NET_PROTOCOLS protocol;
- bool processed;
-
- // linking
- struct Node *prev, *next;
-} Node;
-
-typedef struct HashTable {
- Node *table[HASH_TABLE_SIZE];
-} HashTable;
-
-static HashTable *hashTable_key_inode_port_value = NULL;
-
-static inline void generate_output(const char *protocol, const char *address, unsigned int port, const char *cmdline) {
- printf("%s|%s|%u|%s\n", protocol, address, port, cmdline);
-}
-
-HashTable* createHashTable() {
- HashTable *hashTable = (HashTable*)mallocz(sizeof(HashTable));
- memset(hashTable, 0, sizeof(HashTable));
- return hashTable;
-}
-
-static inline unsigned int hashFunction(unsigned int inode) {
- return inode % HASH_TABLE_SIZE;
-}
-
-static inline void insertHashTable(HashTable *hashTable, unsigned int inode, unsigned int port, PROC_NET_PROTOCOLS protocol, char *local_address) {
- unsigned int index = hashFunction(inode);
- Node *newNode = (Node*)mallocz(sizeof(Node));
- newNode->inode = inode;
- newNode->port = port;
- newNode->protocol = protocol;
- strncpyz(newNode->local_address, local_address, INET6_ADDRSTRLEN - 1);
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(hashTable->table[index], newNode, prev, next);
-}
-
-static inline bool lookupHashTable_and_execute(HashTable *hashTable, unsigned int inode, pid_t pid) {
- unsigned int index = hashFunction(inode);
- for(Node *node = hashTable->table[index], *next = NULL ; node ; node = next) {
- next = node->next;
-
- if(node->inode == inode && node->port) {
- DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(hashTable->table[index], node, prev, next);
- char cmdline[8192] = "";
- read_cmdline(pid, cmdline, sizeof(cmdline));
- fix_cmdline(cmdline);
- generate_output(protocol_name(node->protocol), node->local_address, node->port, cmdline);
- freez(node);
- return true;
- }
- }
-
- return false;
-}
-
-void freeHashTable(HashTable *hashTable) {
- for (unsigned int i = 0; i < HASH_TABLE_SIZE; i++) {
- while(hashTable->table[i]) {
- Node *tmp = hashTable->table[i];
- DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(hashTable->table[i], tmp, prev, next);
- generate_output(protocol_name(tmp->protocol), tmp->local_address, tmp->port, "");
- freez(tmp);
- }
- }
- freez(hashTable);
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void found_this_socket_inode(pid_t pid, unsigned int inode) {
- lookupHashTable_and_execute(hashTable_key_inode_port_value, inode, pid);
-}
-
-bool find_all_sockets_in_proc(const char *proc_filename) {
- DIR *proc_dir, *fd_dir;
- struct dirent *proc_entry, *fd_entry;
- char path_buffer[FILENAME_MAX + 1];
-
- proc_dir = opendir(proc_filename);
- if (proc_dir == NULL) {
- if(++errors_encountered < MAX_ERROR_LOGS)
- collector_error("LOCAL-LISTENERS: cannot opendir() '%s'", proc_filename);
-
- pid_fds_failed++;
- return false;
- }
-
- while ((proc_entry = readdir(proc_dir)) != NULL) {
- // Check if directory entry is a PID by seeing if the name is made up of digits only
- int is_pid = 1;
- for (char *c = proc_entry->d_name; *c != '\0'; c++) {
- if (*c < '0' || *c > '9') {
- is_pid = 0;
- break;
- }
- }
-
- if (!is_pid)
- continue;
-
- // Build the path to the fd directory of the process
- snprintfz(path_buffer, FILENAME_MAX, "%s/%s/fd/", proc_filename, proc_entry->d_name);
-
- fd_dir = opendir(path_buffer);
- if (fd_dir == NULL) {
- if(++errors_encountered < MAX_ERROR_LOGS)
- collector_error("LOCAL-LISTENERS: cannot opendir() '%s'", path_buffer);
-
- pid_fds_failed++;
- continue;
- }
-
- while ((fd_entry = readdir(fd_dir)) != NULL) {
- if(!strcmp(fd_entry->d_name, ".") || !strcmp(fd_entry->d_name, ".."))
- continue;
-
- char link_path[FILENAME_MAX + 1];
- char link_target[FILENAME_MAX + 1];
- int inode;
-
- // Build the path to the file descriptor link
- snprintfz(link_path, FILENAME_MAX, "%s/%s", path_buffer, fd_entry->d_name);
-
- ssize_t len = readlink(link_path, link_target, sizeof(link_target) - 1);
- if (len == -1) {
- if(++errors_encountered < MAX_ERROR_LOGS)
- collector_error("LOCAL-LISTENERS: cannot read link '%s'", link_path);
-
- pid_fds_failed++;
- continue;
- }
- link_target[len] = '\0';
-
- pid_fds_processed++;
-
- // If the link target indicates a socket, print its inode number
- if (sscanf(link_target, "socket:[%d]", &inode) == 1)
- found_this_socket_inode((pid_t)strtoul(proc_entry->d_name, NULL, 10), inode);
- }
-
- closedir(fd_dir);
- }
-
- closedir(proc_dir);
- return true;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void add_port_and_inode(PROC_NET_PROTOCOLS protocol, unsigned int port, unsigned int inode, char *local_address) {
- insertHashTable(hashTable_key_inode_port_value, inode, port, protocol, local_address);
-}
-
-static inline void print_ipv6_address(const char *ipv6_str, char *dst) {
- unsigned k;
- char buf[9];
- struct sockaddr_in6 sa;
-
- // Initialize sockaddr_in6
- memset(&sa, 0, sizeof(struct sockaddr_in6));
- sa.sin6_family = AF_INET6;
- sa.sin6_port = htons(0); // replace 0 with your port number
-
- // Convert hex string to byte array
- for (k = 0; k < 4; ++k)
- {
- memset(buf, 0, 9);
- memcpy(buf, ipv6_str + (k * 8), 8);
- sa.sin6_addr.s6_addr32[k] = strtoul(buf, NULL, 16);
- }
-
- // Convert to human-readable format
- if (inet_ntop(AF_INET6, &(sa.sin6_addr), dst, INET6_ADDRSTRLEN) == NULL)
- *dst = '\0';
-}
-
-static inline void print_ipv4_address(uint32_t address, char *dst) {
- uint8_t octets[4];
- octets[0] = address & 0xFF;
- octets[1] = (address >> 8) & 0xFF;
- octets[2] = (address >> 16) & 0xFF;
- octets[3] = (address >> 24) & 0xFF;
- sprintf(dst, "%u.%u.%u.%u", octets[0], octets[1], octets[2], octets[3]);
-}
-
-bool read_proc_net_x(const char *filename, PROC_NET_PROTOCOLS protocol) {
- FILE *fp;
- char *line = NULL;
- size_t len = 0;
- ssize_t read;
- char address[INET6_ADDRSTRLEN];
-
- ssize_t min_line_length = (protocol == PROC_NET_PROTOCOL_TCP || protocol == PROC_NET_PROTOCOL_UDP) ? 105 : 155;
-
- fp = fopen(filename, "r");
- if (fp == NULL)
- return false;
-
- // Read line by line
- while ((read = getline(&line, &len, fp)) != -1) {
- if(read < min_line_length) continue;
-
- char local_address6[33], rem_address6[33];
- unsigned int local_address, local_port, state, rem_address, rem_port, inode;
-
- switch(protocol) {
- case PROC_NET_PROTOCOL_TCP:
- if(line[34] != '0' || line[35] != 'A')
- continue;
- // fall-through
-
- case PROC_NET_PROTOCOL_UDP:
- if (sscanf(line, "%*d: %X:%X %X:%X %X %*X:%*X %*X:%*X %*X %*d %*d %u",
- &local_address, &local_port, &rem_address, &rem_port, &state, &inode) != 6)
- continue;
-
- print_ipv4_address(local_address, address);
- break;
-
- case PROC_NET_PROTOCOL_TCP6:
- if(line[82] != '0' || line[83] != 'A')
- continue;
- // fall-through
-
- case PROC_NET_PROTOCOL_UDP6:
- if(sscanf(line, "%*d: %32[0-9A-Fa-f]:%X %32[0-9A-Fa-f]:%X %X %*X:%*X %*X:%*X %*X %*d %*d %u",
- local_address6, &local_port, rem_address6, &rem_port, &state, &inode) != 6)
- continue;
-
- print_ipv6_address(local_address6, address);
- break;
- }
-
- add_port_and_inode(protocol, local_port, inode, address);
- }
-
- fclose(fp);
- if (line)
- free(line);
-
- return true;
-}
-
-// ----------------------------------------------------------------------------
-typedef struct {
- bool read_tcp;
- bool read_tcp6;
- bool read_udp;
- bool read_udp6;
-} CommandLineArguments;
-
-int main(int argc, char **argv) {
- char path[FILENAME_MAX + 1];
- hashTable_key_inode_port_value = createHashTable();
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(!netdata_configured_host_prefix) netdata_configured_host_prefix = "";
-
- CommandLineArguments args = {.read_tcp = false, .read_tcp6 = false, .read_udp = false, .read_udp6 = false};
-
- for (int i = 1; i < argc; i++) {
- if (strcmp("tcp", argv[i]) == 0) {
- args.read_tcp = true;
- continue;
- } else if (strcmp("tcp6", argv[i]) == 0) {
- args.read_tcp6 = true;
- continue;
- } else if (strcmp("udp", argv[i]) == 0) {
- args.read_udp = true;
- continue;
- } else if (strcmp("udp6", argv[i]) == 0) {
- args.read_udp6 = true;
- continue;
- }
- }
-
- bool read_all_files = (!args.read_tcp && !args.read_tcp6 && !args.read_udp && !args.read_udp6);
-
- if (read_all_files || args.read_tcp) {
- snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp", netdata_configured_host_prefix);
- read_proc_net_x(path, PROC_NET_PROTOCOL_TCP);
- }
-
- if (read_all_files || args.read_udp) {
- snprintfz(path, FILENAME_MAX, "%s/proc/net/udp", netdata_configured_host_prefix);
- read_proc_net_x(path, PROC_NET_PROTOCOL_UDP);
- }
-
- if (read_all_files || args.read_tcp6) {
- snprintfz(path, FILENAME_MAX, "%s/proc/net/tcp6", netdata_configured_host_prefix);
- read_proc_net_x(path, PROC_NET_PROTOCOL_TCP6);
- }
-
- if (read_all_files || args.read_udp6) {
- snprintfz(path, FILENAME_MAX, "%s/proc/net/udp6", netdata_configured_host_prefix);
- read_proc_net_x(path, PROC_NET_PROTOCOL_UDP6);
- }
-
- snprintfz(path, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
- find_all_sockets_in_proc(path);
-
- freeHashTable(hashTable_key_inode_port_value);
- return 0;
-}
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
deleted file mode 100644
index 20061ad29..000000000
--- a/collectors/plugins.d/plugins_d.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugins_d.h"
-#include "pluginsd_parser.h"
-
-char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { [0] = PLUGINS_DIR, };
-struct plugind *pluginsd_root = NULL;
-
-inline size_t pluginsd_initialize_plugin_directories()
-{
- char plugins_dirs[(FILENAME_MAX * 2) + 1];
- static char *plugins_dir_list = NULL;
-
- // Get the configuration entry
- if (likely(!plugins_dir_list)) {
- snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR);
- plugins_dir_list = strdupz(config_get(CONFIG_SECTION_DIRECTORIES, "plugins", plugins_dirs));
- }
-
- // Parse it and store it to plugin directories
- return quoted_strings_splitter_config(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES);
-}
-
-static inline void plugin_set_disabled(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- cd->unsafe.enabled = false;
- spinlock_unlock(&cd->unsafe.spinlock);
-}
-
-bool plugin_is_enabled(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- bool ret = cd->unsafe.enabled;
- spinlock_unlock(&cd->unsafe.spinlock);
- return ret;
-}
-
-static inline void plugin_set_running(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- cd->unsafe.running = true;
- spinlock_unlock(&cd->unsafe.spinlock);
-}
-
-static inline bool plugin_is_running(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- bool ret = cd->unsafe.running;
- spinlock_unlock(&cd->unsafe.spinlock);
- return ret;
-}
-
-static void pluginsd_worker_thread_cleanup(void *arg) {
- struct plugind *cd = (struct plugind *)arg;
-
- worker_unregister();
-
- spinlock_lock(&cd->unsafe.spinlock);
-
- cd->unsafe.running = false;
- cd->unsafe.thread = 0;
-
- pid_t pid = cd->unsafe.pid;
- cd->unsafe.pid = 0;
-
- spinlock_unlock(&cd->unsafe.spinlock);
-
- if (pid) {
- siginfo_t info;
- netdata_log_info("PLUGINSD: 'host:%s', killing data collection child process with pid %d",
- rrdhost_hostname(cd->host), pid);
-
- if (killpid(pid) != -1) {
- netdata_log_info("PLUGINSD: 'host:%s', waiting for data collection child process pid %d to exit...",
- rrdhost_hostname(cd->host), pid);
-
- netdata_waitid(P_PID, (id_t)pid, &info, WEXITED);
- }
- }
-}
-
-#define SERIAL_FAILURES_THRESHOLD 10
-static void pluginsd_worker_thread_handle_success(struct plugind *cd) {
- if (likely(cd->successful_collections)) {
- sleep((unsigned int)cd->update_every);
- return;
- }
-
- if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) {
- netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid,
- plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled.");
-
- sleep((unsigned int)(cd->update_every * 10));
- return;
- }
-
- if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, "
- "although it reports success (exits with 0)."
- "We have tried to collect something %zu times - unsuccessfully. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, cd->serial_failures);
- plugin_set_disabled(cd);
- return;
- }
-}
-
-static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) {
- if (worker_ret_code == -1) {
- netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) was killed with SIGTERM. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid);
- plugin_set_disabled(cd);
- return;
- }
-
- if (!cd->successful_collections) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code);
- plugin_set_disabled(cd);
- return;
- }
-
- if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections,
- plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled.");
- sleep((unsigned int)(cd->update_every * 10));
- return;
- }
-
- if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)."
- "We tried to restart it %zu times, but it failed to generate data. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code,
- cd->successful_collections, cd->serial_failures);
- plugin_set_disabled(cd);
- return;
- }
-}
-
-#undef SERIAL_FAILURES_THRESHOLD
-
-static void *pluginsd_worker_thread(void *arg) {
- worker_register("PLUGINSD");
-
- netdata_thread_cleanup_push(pluginsd_worker_thread_cleanup, arg);
-
- {
- struct plugind *cd = (struct plugind *) arg;
- plugin_set_running(cd);
-
- size_t count = 0;
-
- while(service_running(SERVICE_COLLECTORS)) {
- FILE *fp_child_input = NULL;
- FILE *fp_child_output = netdata_popen(cd->cmd, &cd->unsafe.pid, &fp_child_input);
-
- if(unlikely(!fp_child_input || !fp_child_output)) {
- netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").",
- rrdhost_hostname(cd->host), cd->cmd);
- break;
- }
-
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: 'host:%s' connected to '%s' running on pid %d",
- rrdhost_hostname(cd->host),
- cd->fullfilename, cd->unsafe.pid);
-
- const char *plugin = strrchr(cd->fullfilename, '/');
- if(plugin)
- plugin++;
- else
- plugin = cd->fullfilename;
-
- char module[100];
- snprintfz(module, sizeof(module), "plugins.d[%s]", plugin);
- ND_LOG_STACK lgs[] = {
- ND_LOG_FIELD_TXT(NDF_MODULE, module),
- ND_LOG_FIELD_TXT(NDF_NIDL_NODE, rrdhost_hostname(cd->host)),
- ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "pluginsd"),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- count = pluginsd_process(cd->host, cd, fp_child_input, fp_child_output, 0);
-
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count);
-
- killpid(cd->unsafe.pid);
-
- int worker_ret_code = netdata_pclose(fp_child_input, fp_child_output, cd->unsafe.pid);
-
- if(likely(worker_ret_code == 0))
- pluginsd_worker_thread_handle_success(cd);
- else
- pluginsd_worker_thread_handle_error(cd, worker_ret_code);
-
- cd->unsafe.pid = 0;
-
- if(unlikely(!plugin_is_enabled(cd)))
- break;
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-static void pluginsd_main_cleanup(void *data) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- netdata_log_info("PLUGINSD: cleaning up...");
-
- struct plugind *cd;
- for (cd = pluginsd_root; cd; cd = cd->next) {
- spinlock_lock(&cd->unsafe.spinlock);
- if (cd->unsafe.enabled && cd->unsafe.running && cd->unsafe.thread != 0) {
- netdata_log_info("PLUGINSD: 'host:%s', stopping plugin thread: %s",
- rrdhost_hostname(cd->host), cd->id);
-
- netdata_thread_cancel(cd->unsafe.thread);
- }
- spinlock_unlock(&cd->unsafe.spinlock);
- }
-
- netdata_log_info("PLUGINSD: cleanup completed.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-
- worker_unregister();
-}
-
-void *pluginsd_main(void *ptr)
-{
- netdata_thread_cleanup_push(pluginsd_main_cleanup, ptr);
-
- int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1);
- int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
- if (scan_frequency < 1)
- scan_frequency = 1;
-
- // disable some plugins by default
- config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO);
- config_get_boolean(CONFIG_SECTION_PLUGINS, "logs-management",
-#if defined(LOGS_MANAGEMENT_DEV_MODE)
- CONFIG_BOOLEAN_YES
-#else
- CONFIG_BOOLEAN_NO
-#endif
- );
- // it crashes (both threads) on Alpine after we made it multi-threaded
- // works with "--device /dev/ipmi0", but this is not default
- // see https://github.com/netdata/netdata/pull/15564 for details
- if (getenv("NETDATA_LISTENER_PORT"))
- config_get_boolean(CONFIG_SECTION_PLUGINS, "freeipmi", CONFIG_BOOLEAN_NO);
-
- // store the errno for each plugins directory
- // so that we don't log broken directories on each loop
- int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
-
- while (service_running(SERVICE_COLLECTORS)) {
- int idx;
- const char *directory_name;
-
- for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) {
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- errno = 0;
- DIR *dir = opendir(directory_name);
- if (unlikely(!dir)) {
- if (directory_errors[idx] != errno) {
- directory_errors[idx] = errno;
- netdata_log_error("cannot open plugins directory '%s'", directory_name);
- }
- continue;
- }
-
- struct dirent *file = NULL;
- while (likely((file = readdir(dir)))) {
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- netdata_log_debug(D_PLUGINSD, "examining file '%s'", file->d_name);
-
- if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0))
- continue;
-
- int len = (int)strlen(file->d_name);
- if (unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN))
- continue;
- if (unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
- netdata_log_debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX);
- continue;
- }
-
- char pluginname[CONFIG_MAX_NAME + 1];
- snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name);
- int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run);
-
- if (unlikely(!enabled)) {
- netdata_log_debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name);
- continue;
- }
-
- // check if it runs already
- struct plugind *cd;
- for (cd = pluginsd_root; cd; cd = cd->next)
- if (unlikely(strcmp(cd->filename, file->d_name) == 0))
- break;
-
- if (likely(cd && plugin_is_running(cd))) {
- netdata_log_debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename);
- continue;
- }
-
- // it is not running
- // allocate a new one, or use the obsolete one
- if (unlikely(!cd)) {
- cd = callocz(sizeof(struct plugind), 1);
-
- snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname);
-
- strncpyz(cd->filename, file->d_name, FILENAME_MAX);
- snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename);
-
- cd->host = localhost;
- cd->unsafe.enabled = enabled;
- cd->unsafe.running = false;
-
- cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every);
- cd->started_t = now_realtime_sec();
-
- char *def = "";
- snprintfz(
- cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every,
- config_get(cd->id, "command options", def));
-
- // link it
- DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(pluginsd_root, cd, prev, next);
-
- if (plugin_is_enabled(cd)) {
- char tag[NETDATA_THREAD_TAG_MAX + 1];
- snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PD[%s]", pluginname);
-
- // spawn a new thread for it
- netdata_thread_create(&cd->unsafe.thread,
- tag,
- NETDATA_THREAD_OPTION_DEFAULT,
- pluginsd_worker_thread,
- cd);
- }
- }
- }
-
- closedir(dir);
- }
-
- sleep((unsigned int)scan_frequency);
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
deleted file mode 100644
index 37c70f7e3..000000000
--- a/collectors/plugins.d/plugins_d.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINS_D_H
-#define NETDATA_PLUGINS_D_H 1
-
-#include "daemon/common.h"
-
-#define PLUGINSD_FILE_SUFFIX ".plugin"
-#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
-#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
-#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0
-
-#define PLUGINSD_KEYWORD_FUNCTION_PAYLOAD "FUNCTION_PAYLOAD"
-#define PLUGINSD_KEYWORD_FUNCTION_PAYLOAD_END "FUNCTION_PAYLOAD_END"
-
-#define PLUGINSD_KEYWORD_DYNCFG_ENABLE "DYNCFG_ENABLE"
-#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE "DYNCFG_REGISTER_MODULE"
-#define PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB "DYNCFG_REGISTER_JOB"
-#define PLUGINSD_KEYWORD_DYNCFG_RESET "DYNCFG_RESET"
-
-#define PLUGINSD_KEYWORD_REPORT_JOB_STATUS "REPORT_JOB_STATUS"
-#define PLUGINSD_KEYWORD_DELETE_JOB "DELETE_JOB"
-
-#define PLUGINSD_MAX_DIRECTORIES 20
-extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
-
-struct plugind {
- char id[CONFIG_MAX_NAME+1]; // config node id
-
- char filename[FILENAME_MAX+1]; // just the filename
- char fullfilename[FILENAME_MAX+1]; // with path
- char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
-
- size_t successful_collections; // the number of times we have seen
- // values collected from this plugin
-
- size_t serial_failures; // the number of times the plugin started
- // without collecting values
-
- RRDHOST *host; // the host the plugin collects data for
- int update_every; // the plugin default data collection frequency
-
- struct {
- SPINLOCK spinlock;
- bool running; // do not touch this structure after setting this to 1
- bool enabled; // if this is enabled or not
- netdata_thread_t thread;
- pid_t pid;
- } unsafe;
-
- time_t started_t;
-
- const DICTIONARY_ITEM *cfg_dict_item;
- struct configurable_plugin *configuration;
-
- struct plugind *prev;
- struct plugind *next;
-};
-
-extern struct plugind *pluginsd_root;
-
-size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations);
-void pluginsd_process_thread_cleanup(void *ptr);
-
-size_t pluginsd_initialize_plugin_directories();
-
-#endif /* NETDATA_PLUGINS_D_H */
diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c
deleted file mode 100644
index 3b47c6c0f..000000000
--- a/collectors/plugins.d/pluginsd_parser.c
+++ /dev/null
@@ -1,3208 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_parser.h"
-
-#define LOG_FUNCTIONS false
-
-#define SERVING_STREAMING(parser) ((parser)->repertoire == PARSER_INIT_STREAMING)
-#define SERVING_PLUGINSD(parser) ((parser)->repertoire == PARSER_INIT_PLUGINSD)
-
-static ssize_t send_to_plugin(const char *txt, void *data) {
- PARSER *parser = data;
-
- if(!txt || !*txt)
- return 0;
-
-#ifdef ENABLE_H2O
- if(parser->h2o_ctx)
- return h2o_stream_write(parser->h2o_ctx, txt, strlen(txt));
-#endif
-
- errno = 0;
- spinlock_lock(&parser->writer.spinlock);
- ssize_t bytes = -1;
-
-#ifdef ENABLE_HTTPS
- NETDATA_SSL *ssl = parser->ssl_output;
- if(ssl) {
-
- if(SSL_connection(ssl))
- bytes = netdata_ssl_write(ssl, (void *) txt, strlen(txt));
-
- else
- netdata_log_error("PLUGINSD: cannot send command (SSL)");
-
- spinlock_unlock(&parser->writer.spinlock);
- return bytes;
- }
-#endif
-
- if(parser->fp_output) {
-
- bytes = fprintf(parser->fp_output, "%s", txt);
- if(bytes <= 0) {
- netdata_log_error("PLUGINSD: cannot send command (FILE)");
- bytes = -2;
- }
- else
- fflush(parser->fp_output);
-
- spinlock_unlock(&parser->writer.spinlock);
- return bytes;
- }
-
- if(parser->fd != -1) {
- bytes = 0;
- ssize_t total = (ssize_t)strlen(txt);
- ssize_t sent;
-
- do {
- sent = write(parser->fd, &txt[bytes], total - bytes);
- if(sent <= 0) {
- netdata_log_error("PLUGINSD: cannot send command (fd)");
- spinlock_unlock(&parser->writer.spinlock);
- return -3;
- }
- bytes += sent;
- }
- while(bytes < total);
-
- spinlock_unlock(&parser->writer.spinlock);
- return (int)bytes;
- }
-
- spinlock_unlock(&parser->writer.spinlock);
- netdata_log_error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)");
- return -4;
-}
-
-static inline RRDHOST *pluginsd_require_scope_host(PARSER *parser, const char *cmd) {
- RRDHOST *host = parser->user.host;
-
- if(unlikely(!host))
- netdata_log_error("PLUGINSD: command %s requires a host, but is not set.", cmd);
-
- return host;
-}
-
-static inline RRDSET *pluginsd_require_scope_chart(PARSER *parser, const char *cmd, const char *parent_cmd) {
- RRDSET *st = parser->user.st;
-
- if(unlikely(!st))
- netdata_log_error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd);
-
- return st;
-}
-
-static inline RRDSET *pluginsd_get_scope_chart(PARSER *parser) {
- return parser->user.st;
-}
-
-static inline void pluginsd_lock_rrdset_data_collection(PARSER *parser) {
- if(parser->user.st && !parser->user.v2.locked_data_collection) {
- spinlock_lock(&parser->user.st->data_collection_lock);
- parser->user.v2.locked_data_collection = true;
- }
-}
-
-static inline bool pluginsd_unlock_rrdset_data_collection(PARSER *parser) {
- if(parser->user.st && parser->user.v2.locked_data_collection) {
- spinlock_unlock(&parser->user.st->data_collection_lock);
- parser->user.v2.locked_data_collection = false;
- return true;
- }
-
- return false;
-}
-
-static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const char *keyword, bool stale) {
- if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) {
- if(stale)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked",
- rrdhost_hostname(parser->user.st->rrdhost),
- rrdset_id(parser->user.st),
- keyword);
- }
-
- if(unlikely(parser->user.v2.ml_locked)) {
- ml_chart_update_end(parser->user.st);
- parser->user.v2.ml_locked = false;
-
- if(stale)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked",
- rrdhost_hostname(parser->user.st->rrdhost),
- rrdset_id(parser->user.st),
- keyword);
- }
-}
-
-static inline void pluginsd_clear_scope_chart(PARSER *parser, const char *keyword) {
- pluginsd_unlock_previous_scope_chart(parser, keyword, true);
-
- if(parser->user.cleanup_slots && parser->user.st)
- rrdset_pluginsd_receive_unslot(parser->user.st);
-
- parser->user.st = NULL;
- parser->user.cleanup_slots = false;
-}
-
-static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const char *keyword) {
- RRDSET *old_st = parser->user.st;
- pid_t old_collector_tid = (old_st) ? old_st->pluginsd.collector_tid : 0;
- pid_t my_collector_tid = gettid();
-
- if(unlikely(old_collector_tid)) {
- if(old_collector_tid != my_collector_tid) {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING,
- "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)",
- keyword ? keyword : "UNKNOWN",
- rrdhost_hostname(st->rrdhost), rrdset_id(st),
- my_collector_tid, old_collector_tid);
-
- return false;
- }
-
- old_st->pluginsd.collector_tid = 0;
- }
-
- st->pluginsd.collector_tid = my_collector_tid;
-
- pluginsd_clear_scope_chart(parser, keyword);
-
- st->pluginsd.pos = 0;
- parser->user.st = st;
- parser->user.cleanup_slots = false;
-
- return true;
-}
-
-static inline void pluginsd_rrddim_put_to_slot(PARSER *parser, RRDSET *st, RRDDIM *rd, ssize_t slot, bool obsolete) {
- size_t wanted_size = st->pluginsd.size;
-
- if(slot >= 1) {
- st->pluginsd.dims_with_slots = true;
- wanted_size = slot;
- }
- else {
- st->pluginsd.dims_with_slots = false;
- wanted_size = dictionary_entries(st->rrddim_root_index);
- }
-
- if(wanted_size > st->pluginsd.size) {
- st->pluginsd.prd_array = reallocz(st->pluginsd.prd_array, wanted_size * sizeof(struct pluginsd_rrddim));
-
- // initialize the empty slots
- for(ssize_t i = (ssize_t) wanted_size - 1; i >= (ssize_t) st->pluginsd.size; i--) {
- st->pluginsd.prd_array[i].rda = NULL;
- st->pluginsd.prd_array[i].rd = NULL;
- st->pluginsd.prd_array[i].id = NULL;
- }
-
- st->pluginsd.size = wanted_size;
- }
-
- if(st->pluginsd.dims_with_slots) {
- struct pluginsd_rrddim *prd = &st->pluginsd.prd_array[slot - 1];
-
- if(prd->rd != rd) {
- prd->rda = rrddim_find_and_acquire(st, string2str(rd->id));
- prd->rd = rrddim_acquired_to_rrddim(prd->rda);
- prd->id = string2str(prd->rd->id);
- }
-
- if(obsolete)
- parser->user.cleanup_slots = true;
- }
-}
-
-static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, ssize_t slot, const char *cmd) {
- if (unlikely(!dimension || !*dimension)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.",
- rrdhost_hostname(host), rrdset_id(st), cmd);
- return NULL;
- }
-
- if (unlikely(!st->pluginsd.size)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, but the chart has no dimensions.",
- rrdhost_hostname(host), rrdset_id(st), cmd);
- return NULL;
- }
-
- struct pluginsd_rrddim *prd;
- RRDDIM *rd;
-
- if(likely(st->pluginsd.dims_with_slots)) {
- // caching with slots
-
- if(unlikely(slot < 1 || slot > st->pluginsd.size)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s with slot %zd, but slots in the range [1 - %u] are expected.",
- rrdhost_hostname(host), rrdset_id(st), cmd, slot, st->pluginsd.size);
- return NULL;
- }
-
- prd = &st->pluginsd.prd_array[slot - 1];
-
- rd = prd->rd;
- if(likely(rd)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(strcmp(prd->id, dimension) != 0) {
- ssize_t t;
- for(t = 0; t < st->pluginsd.size ;t++) {
- if (strcmp(st->pluginsd.prd_array[t].id, dimension) == 0)
- break;
- }
- if(t >= st->pluginsd.size)
- t = -1;
-
- internal_fatal(true,
- "PLUGINSD: expected to find dimension '%s' on slot %zd, but found '%s', "
- "the right slot is %zd",
- dimension, slot, prd->id, t);
- }
-#endif
- return rd;
- }
- }
- else {
- // caching without slots
-
- if(unlikely(st->pluginsd.pos >= st->pluginsd.size))
- st->pluginsd.pos = 0;
-
- prd = &st->pluginsd.prd_array[st->pluginsd.pos++];
-
- rd = prd->rd;
- if(likely(rd)) {
- const char *id = prd->id;
-
- if(strcmp(id, dimension) == 0) {
- // we found it cached
- return rd;
- }
- else {
- // the cached one is not good for us
- rrddim_acquired_release(prd->rda);
- prd->rda = NULL;
- prd->rd = NULL;
- prd->id = NULL;
- }
- }
- }
-
- // we need to find the dimension and set it to prd
-
- RRDDIM_ACQUIRED *rda = rrddim_find_and_acquire(st, dimension);
- if (unlikely(!rda)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.",
- rrdhost_hostname(host), rrdset_id(st), dimension, cmd);
-
- return NULL;
- }
-
- prd->rda = rda;
- prd->rd = rd = rrddim_acquired_to_rrddim(rda);
- prd->id = string2str(rd->id);
-
- return rd;
-}
-
-static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) {
- if (unlikely(!chart || !*chart)) {
- netdata_log_error("PLUGINSD: 'host:%s' got a %s without a chart id.",
- rrdhost_hostname(host), cmd);
- return NULL;
- }
-
- RRDSET *st = rrdset_find(host, chart);
- if (unlikely(!st))
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.",
- rrdhost_hostname(host), chart, cmd);
-
- return st;
-}
-
-static inline ssize_t pluginsd_parse_rrd_slot(char **words, size_t num_words) {
- ssize_t slot = -1;
- char *id = get_word(words, num_words, 1);
- if(id && id[0] == PLUGINSD_KEYWORD_SLOT[0] && id[1] == PLUGINSD_KEYWORD_SLOT[1] &&
- id[2] == PLUGINSD_KEYWORD_SLOT[2] && id[3] == PLUGINSD_KEYWORD_SLOT[3] && id[4] == ':') {
- slot = (ssize_t) str2ull_encoded(&id[5]);
- if(slot < 0) slot = 0; // to make the caller increment its idx of the words
- }
-
- return slot;
-}
-
-static inline void pluginsd_rrdset_cache_put_to_slot(PARSER *parser, RRDSET *st, ssize_t slot, bool obsolete) {
- // clean possible old cached data
- rrdset_pluginsd_receive_unslot(st);
-
- if(unlikely(slot < 1 || slot >= INT32_MAX))
- return;
-
- RRDHOST *host = st->rrdhost;
-
- if(unlikely((size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) {
- spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock);
- size_t old_slots = host->rrdpush.receive.pluginsd_chart_slots.size;
- size_t new_slots = (old_slots < PLUGINSD_MIN_RRDSET_POINTERS_CACHE) ? PLUGINSD_MIN_RRDSET_POINTERS_CACHE : old_slots * 2;
-
- if(new_slots < (size_t)slot)
- new_slots = slot;
-
- host->rrdpush.receive.pluginsd_chart_slots.array =
- reallocz(host->rrdpush.receive.pluginsd_chart_slots.array, new_slots * sizeof(RRDSET *));
-
- for(size_t i = old_slots; i < new_slots ;i++)
- host->rrdpush.receive.pluginsd_chart_slots.array[i] = NULL;
-
- host->rrdpush.receive.pluginsd_chart_slots.size = new_slots;
- spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock);
- }
-
- host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1] = st;
- st->pluginsd.last_slot = (int32_t)slot - 1;
- parser->user.cleanup_slots = obsolete;
-}
-
-static inline RRDSET *pluginsd_rrdset_cache_get_from_slot(PARSER *parser, RRDHOST *host, const char *id, ssize_t slot, const char *keyword) {
- if(unlikely(slot < 1 || (size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size))
- return pluginsd_find_chart(host, id, keyword);
-
- RRDSET *st = host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1];
-
- if(!st) {
- st = pluginsd_find_chart(host, id, keyword);
- if(st)
- pluginsd_rrdset_cache_put_to_slot(parser, st, slot, rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE));
- }
- else {
- internal_fatal(string_strcmp(st->id, id) != 0,
- "PLUGINSD: wrong chart in slot %zd, expected '%s', found '%s'",
- slot - 1, id, string2str(st->id));
- }
-
- return st;
-}
-
-static inline PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg) {
- parser->user.enabled = 0;
-
- if(keyword && msg) {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_INFO,
- "PLUGINSD: keyword %s: %s", keyword, msg);
- }
-
- return PARSER_RC_ERROR;
-}
-
-static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *value = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(D_PLUGINSD, "PLUGINSD: 'host:%s/chart:%s/dim:%s' SET is setting value to '%s'",
- rrdhost_hostname(host), rrdset_id(st), dimension, value && *value ? value : "UNSET");
-
- if (value && *value)
- rrddim_set_by_pointer(st, rd, str2ll_encoded(value));
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *microseconds_txt = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t microseconds = 0;
- if (microseconds_txt && *microseconds_txt) {
- long long t = str2ll(microseconds_txt, NULL);
- if(t >= 0)
- microseconds = t;
- }
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- if(st->replay.log_next_data_collection) {
- st->replay.log_next_data_collection = false;
-
- internal_error(true,
- "REPLAY: 'host:%s/chart:%s' first BEGIN after replication, last collected %llu, last updated %llu, microseconds %llu",
- rrdhost_hostname(host), rrdset_id(st),
- st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec,
- st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec,
- microseconds
- );
- }
-#endif
-
- if (likely(st->counter_done)) {
- if (likely(microseconds)) {
- if (parser->user.trust_durations)
- rrdset_next_usec_unfiltered(st, microseconds);
- else
- rrdset_next_usec(st, microseconds);
- }
- else
- rrdset_next(st);
- }
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *parser) {
- char *tv_sec = get_word(words, num_words, 1);
- char *tv_usec = get_word(words, num_words, 2);
- char *pending_rrdset_next = get_word(words, num_words, 3);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(D_PLUGINSD, "requested an END on chart '%s'", rrdset_id(st));
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_END);
- parser->user.data_collections_count++;
-
- struct timeval tv = {
- .tv_sec = (tv_sec && *tv_sec) ? str2ll(tv_sec, NULL) : 0,
- .tv_usec = (tv_usec && *tv_usec) ? str2ll(tv_usec, NULL) : 0
- };
-
- if(!tv.tv_sec)
- now_realtime_timeval(&tv);
-
- rrdset_timed_done(st, tv, pending_rrdset_next && *pending_rrdset_next ? true : false);
-
- return PARSER_RC_OK;
-}
-
-static void pluginsd_host_define_cleanup(PARSER *parser) {
- string_freez(parser->user.host_define.hostname);
- rrdlabels_destroy(parser->user.host_define.rrdlabels);
-
- parser->user.host_define.hostname = NULL;
- parser->user.host_define.rrdlabels = NULL;
- parser->user.host_define.parsing_host = false;
-}
-
-static inline bool pluginsd_validate_machine_guid(const char *guid, uuid_t *uuid, char *output) {
- if(uuid_parse(guid, *uuid))
- return false;
-
- uuid_unparse_lower(*uuid, output);
-
- return true;
-}
-
-static inline PARSER_RC pluginsd_host_define(char **words, size_t num_words, PARSER *parser) {
- char *guid = get_word(words, num_words, 1);
- char *hostname = get_word(words, num_words, 2);
-
- if(unlikely(!guid || !*guid || !hostname || !*hostname))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "missing parameters");
-
- if(unlikely(parser->user.host_define.parsing_host))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE,
- "another host definition is already open - did you send " PLUGINSD_KEYWORD_HOST_DEFINE_END "?");
-
- if(!pluginsd_validate_machine_guid(guid, &parser->user.host_define.machine_guid, parser->user.host_define.machine_guid_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "cannot parse MACHINE_GUID - is it a valid UUID?");
-
- parser->user.host_define.hostname = string_strdupz(hostname);
- parser->user.host_define.rrdlabels = rrdlabels_create();
- parser->user.host_define.parsing_host = true;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, RRDLABELS *labels, const char *keyword) {
- char *name = get_word(words, num_words, 1);
- char *value = get_word(words, num_words, 2);
-
- if(!name || !*name || !value)
- return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "missing parameters");
-
- if(!parser->user.host_define.parsing_host || !labels)
- return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "host is not defined, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this");
-
- rrdlabels_add(labels, name, value, RRDLABEL_SRC_CONFIG);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host_labels(char **words, size_t num_words, PARSER *parser) {
- return pluginsd_host_dictionary(words, num_words, parser,
- parser->user.host_define.rrdlabels,
- PLUGINSD_KEYWORD_HOST_LABEL);
-}
-
-static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- if(!parser->user.host_define.parsing_host)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END, "missing initialization, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this");
-
- RRDHOST *host = rrdhost_find_or_create(
- string2str(parser->user.host_define.hostname),
- string2str(parser->user.host_define.hostname),
- parser->user.host_define.machine_guid_str,
- "Netdata Virtual Host 1.0",
- netdata_configured_timezone,
- netdata_configured_abbrev_timezone,
- netdata_configured_utc_offset,
- NULL,
- program_name,
- program_version,
- default_rrd_update_every,
- default_rrd_history_entries,
- default_rrd_memory_mode,
- default_health_enabled,
- default_rrdpush_enabled,
- default_rrdpush_destination,
- default_rrdpush_api_key,
- default_rrdpush_send_charts_matching,
- default_rrdpush_enable_replication,
- default_rrdpush_seconds_to_replicate,
- default_rrdpush_replication_step,
- rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels),
- false);
-
- rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST);
-
- if(host->rrdlabels) {
- rrdlabels_migrate_to_these(host->rrdlabels, parser->user.host_define.rrdlabels);
- }
- else {
- host->rrdlabels = parser->user.host_define.rrdlabels;
- parser->user.host_define.rrdlabels = NULL;
- }
-
- pluginsd_host_define_cleanup(parser);
-
- parser->user.host = host;
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END);
-
- rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN);
- rrdcontext_host_child_connected(host);
- schedule_node_info_update(host);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host(char **words, size_t num_words, PARSER *parser) {
- char *guid = get_word(words, num_words, 1);
-
- if(!guid || !*guid || strcmp(guid, "localhost") == 0) {
- parser->user.host = localhost;
- return PARSER_RC_OK;
- }
-
- uuid_t uuid;
- char uuid_str[UUID_STR_LEN];
- if(!pluginsd_validate_machine_guid(guid, &uuid, uuid_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot parse MACHINE_GUID - is it a valid UUID?");
-
- RRDHOST *host = rrdhost_find_by_guid(uuid_str);
- if(unlikely(!host))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot find a host with this machine guid - have you created it?");
-
- parser->user.host = host;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *type = get_word(words, num_words, idx++);
- char *name = get_word(words, num_words, idx++);
- char *title = get_word(words, num_words, idx++);
- char *units = get_word(words, num_words, idx++);
- char *family = get_word(words, num_words, idx++);
- char *context = get_word(words, num_words, idx++);
- char *chart = get_word(words, num_words, idx++);
- char *priority_s = get_word(words, num_words, idx++);
- char *update_every_s = get_word(words, num_words, idx++);
- char *options = get_word(words, num_words, idx++);
- char *plugin = get_word(words, num_words, idx++);
- char *module = get_word(words, num_words, idx++);
-
- // parse the id from type
- char *id = NULL;
- if (likely(type && (id = strchr(type, '.')))) {
- *id = '\0';
- id++;
- }
-
- // make sure we have the required variables
- if (unlikely((!type || !*type || !id || !*id)))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_CHART, "missing parameters");
-
- // parse the name, and make sure it does not include 'type.'
- if (unlikely(name && *name)) {
- // when data are streamed from child nodes
- // name will be type.name
- // so, we have to remove 'type.' from name too
- size_t len = strlen(type);
- if (strncmp(type, name, len) == 0 && name[len] == '.')
- name = &name[len + 1];
-
- // if the name is the same with the id,
- // or is just 'NULL', clear it.
- if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
- name = NULL;
- }
-
- int priority = 1000;
- if (likely(priority_s && *priority_s))
- priority = str2i(priority_s);
-
- int update_every = parser->user.cd->update_every;
- if (likely(update_every_s && *update_every_s))
- update_every = str2i(update_every_s);
- if (unlikely(!update_every))
- update_every = parser->user.cd->update_every;
-
- RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
- if (unlikely(chart))
- chart_type = rrdset_type_id(chart);
-
- if (unlikely(name && !*name))
- name = NULL;
- if (unlikely(family && !*family))
- family = NULL;
- if (unlikely(context && !*context))
- context = NULL;
- if (unlikely(!title))
- title = "";
- if (unlikely(!units))
- units = "unknown";
-
- netdata_log_debug(
- D_PLUGINSD,
- "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d",
- type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type),
- priority, update_every);
-
- RRDSET *st = NULL;
-
- st = rrdset_create(
- host, type, id, name, family, context, title, units,
- (plugin && *plugin) ? plugin : parser->user.cd->filename,
- module, priority, update_every,
- chart_type);
-
- bool obsolete = false;
- if (likely(st)) {
- if (options && *options) {
- if (strstr(options, "obsolete")) {
- rrdset_is_obsolete___safe_from_collector_thread(st);
- obsolete = true;
- }
- else
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
-
- if (strstr(options, "detail"))
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
-
- if (strstr(options, "hidden"))
- rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
-
- if (strstr(options, "store_first"))
- rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
- else {
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_CHART))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- pluginsd_rrdset_cache_put_to_slot(parser, st, slot, obsolete);
- }
- else
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_CHART);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_chart_definition_end(char **words, size_t num_words, PARSER *parser) {
- const char *first_entry_txt = get_word(words, num_words, 1);
- const char *last_entry_txt = get_word(words, num_words, 2);
- const char *wall_clock_time_txt = get_word(words, num_words, 3);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- time_t first_entry_child = (first_entry_txt && *first_entry_txt) ? (time_t)str2ul(first_entry_txt) : 0;
- time_t last_entry_child = (last_entry_txt && *last_entry_txt) ? (time_t)str2ul(last_entry_txt) : 0;
- time_t child_wall_clock_time = (wall_clock_time_txt && *wall_clock_time_txt) ? (time_t)str2ul(wall_clock_time_txt) : now_realtime_sec();
-
- bool ok = true;
- if(!rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) {
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- st->replay.start_streaming = false;
- st->replay.after = 0;
- st->replay.before = 0;
-#endif
-
- rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS);
- rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED);
- rrdhost_receiver_replicating_charts_plus_one(st->rrdhost);
-
- ok = replicate_chart_request(send_to_plugin, parser, host, st,
- first_entry_child, last_entry_child, child_wall_clock_time,
- 0, 0);
- }
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- else {
- internal_error(true, "REPLAY: 'host:%s/chart:%s' not sending duplicate replication request",
- rrdhost_hostname(st->rrdhost), rrdset_id(st));
- }
-#endif
-
- return ok ? PARSER_RC_OK : PARSER_RC_ERROR;
-}
-
-static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *name = get_word(words, num_words, idx++);
- char *algorithm = get_word(words, num_words, idx++);
- char *multiplier_s = get_word(words, num_words, idx++);
- char *divisor_s = get_word(words, num_words, idx++);
- char *options = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_DIMENSION);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_DIMENSION, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if (unlikely(!id))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "missing dimension id");
-
- long multiplier = 1;
- if (multiplier_s && *multiplier_s) {
- multiplier = str2ll_encoded(multiplier_s);
- if (unlikely(!multiplier))
- multiplier = 1;
- }
-
- long divisor = 1;
- if (likely(divisor_s && *divisor_s)) {
- divisor = str2ll_encoded(divisor_s);
- if (unlikely(!divisor))
- divisor = 1;
- }
-
- if (unlikely(!algorithm || !*algorithm))
- algorithm = "absolute";
-
- if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(
- D_PLUGINSD,
- "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'",
- rrdset_id(st), id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor,
- options ? options : "");
-
- RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
- int unhide_dimension = 1;
-
- rrddim_option_clear(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- bool obsolete = false;
- if (options && *options) {
- if (strstr(options, "obsolete") != NULL) {
- obsolete = true;
- rrddim_is_obsolete___safe_from_collector_thread(st, rd);
- }
- else
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- unhide_dimension = !strstr(options, "hidden");
-
- if (strstr(options, "noreset") != NULL)
- rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if (strstr(options, "nooverflow") != NULL)
- rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- }
- else
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- bool should_update_dimension = false;
-
- if (likely(unhide_dimension)) {
- rrddim_option_clear(rd, RRDDIM_OPTION_HIDDEN);
- should_update_dimension = rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN);
- }
- else {
- rrddim_option_set(rd, RRDDIM_OPTION_HIDDEN);
- should_update_dimension = !rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN);
- }
-
- if (should_update_dimension) {
- rrddim_flag_set(rd, RRDDIM_FLAG_METADATA_UPDATE);
- rrdhost_flag_set(rd->rrdset->rrdhost, RRDHOST_FLAG_METADATA_UPDATE);
- }
-
- pluginsd_rrddim_put_to_slot(parser, st, rd, slot, obsolete);
-
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-// execution of functions
-
-struct inflight_function {
- int code;
- int timeout;
- STRING *function;
- BUFFER *result_body_wb;
- rrd_function_result_callback_t result_cb;
- void *result_cb_data;
- usec_t timeout_ut;
- usec_t started_ut;
- usec_t sent_ut;
- const char *payload;
- PARSER *parser;
- bool virtual;
-};
-
-static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void *func, void *parser_ptr) {
- struct inflight_function *pf = func;
-
- PARSER *parser = parser_ptr;
-
- // leave this code as default, so that when the dictionary is destroyed this will be sent back to the caller
- pf->code = HTTP_RESP_GATEWAY_TIMEOUT;
-
- const char *transaction = dictionary_acquired_item_name(item);
-
- char buffer[2048 + 1];
- snprintfz(buffer, sizeof(buffer) - 1, "%s %s %d \"%s\"\n",
- pf->payload ? "FUNCTION_PAYLOAD" : "FUNCTION",
- transaction,
- pf->timeout,
- string2str(pf->function));
-
- // send the command to the plugin
- ssize_t ret = send_to_plugin(buffer, parser);
-
- pf->sent_ut = now_realtime_usec();
-
- if(ret < 0) {
- netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret);
- rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE);
- }
- else {
- internal_error(LOG_FUNCTIONS,
- "FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)",
- string2str(pf->function), dictionary_acquired_item_name(item), ret,
- pf->sent_ut - pf->started_ut);
- }
-
- if (!pf->payload)
- return;
-
- // send the payload to the plugin
- ret = send_to_plugin(pf->payload, parser);
-
- if(ret < 0) {
- netdata_log_error("FUNCTION_PAYLOAD '%s': failed to send function to plugin, error %zd", string2str(pf->function), ret);
- rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", HTTP_RESP_SERVICE_UNAVAILABLE);
- }
- else {
- internal_error(LOG_FUNCTIONS,
- "FUNCTION_PAYLOAD '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)",
- string2str(pf->function), dictionary_acquired_item_name(item), ret,
- pf->sent_ut - pf->started_ut);
- }
-
- send_to_plugin("\nFUNCTION_PAYLOAD_END\n", parser);
-}
-
-static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) {
- struct inflight_function *pf = new_func;
-
- netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function));
- pf->code = rrd_call_function_error(pf->result_body_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST);
- pf->result_cb(pf->result_body_wb, pf->code, pf->result_cb_data);
- string_freez(pf->function);
-
- return false;
-}
-
-void delete_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug, const char *fnc_sig, int code) {
- if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return;
-
- char *params_local = strdupz(fnc_sig);
- char *words[DYNCFG_MAX_WORDS];
- size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd);
-
- if (words_c != 3) {
- netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for delete_job");
- freez(params_local);
- return;
- }
-
- const char *module = words[1];
- const char *job = words[2];
-
- delete_job(plug, module, job);
-
- unlink_job(plug->name, module, job);
-
- rrdpush_send_job_deleted(localhost, plug->name, module, job);
-
- freez(params_local);
-}
-
-void set_job_finalize(struct parser *parser __maybe_unused, struct configurable_plugin *plug __maybe_unused, const char *fnc_sig, int code) {
- if (code != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return;
-
- char *params_local = strdupz(fnc_sig);
- char *words[DYNCFG_MAX_WORDS];
- size_t words_c = quoted_strings_splitter(params_local, words, DYNCFG_MAX_WORDS, isspace_map_pluginsd);
-
- if (words_c != 3) {
- netdata_log_error("PLUGINSD_PARSER: invalid number of parameters for set_job_config");
- freez(params_local);
- return;
- }
-
- const char *module_name = get_word(words, words_c, 1);
- const char *job_name = get_word(words, words_c, 2);
-
- if (register_job(parser->user.host->configurable_plugins, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED, 1)) {
- freez(params_local);
- return;
- }
-
- // only send this if it is not existing already (register_job cares for that)
- rrdpush_send_dyncfg_reg_job(localhost, parser->user.cd->configuration->name, module_name, job_name, JOB_TYPE_USER, JOB_FLG_USER_CREATED);
-
- freez(params_local);
-}
-
-static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) {
- struct inflight_function *pf = func;
- struct parser *parser = (struct parser *)parser_ptr;
-
- internal_error(LOG_FUNCTIONS,
- "FUNCTION '%s' result of transaction '%s' received from collector (%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)",
- string2str(pf->function), dictionary_acquired_item_name(item),
- buffer_strlen(pf->result_body_wb), pf->sent_ut - pf->started_ut, now_realtime_usec() - pf->sent_ut);
-
- if (pf->virtual && SERVING_PLUGINSD(parser)) {
- if (pf->payload) {
- if (strncmp(string2str(pf->function), FUNCTION_NAME_SET_JOB_CONFIG, strlen(FUNCTION_NAME_SET_JOB_CONFIG)) == 0)
- set_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code);
- dyn_conf_store_config(string2str(pf->function), pf->payload, parser->user.cd->configuration);
- } else if (strncmp(string2str(pf->function), FUNCTION_NAME_DELETE_JOB, strlen(FUNCTION_NAME_DELETE_JOB)) == 0) {
- delete_job_finalize(parser, parser->user.cd->configuration, string2str(pf->function), pf->code);
- }
- }
-
- pf->result_cb(pf->result_body_wb, pf->code, pf->result_cb_data);
-
- string_freez(pf->function);
- freez((void *)pf->payload);
-}
-
-void inflight_functions_init(PARSER *parser) {
- parser->inflight.functions = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE, &dictionary_stats_category_functions, 0);
- dictionary_register_insert_callback(parser->inflight.functions, inflight_functions_insert_callback, parser);
- dictionary_register_delete_callback(parser->inflight.functions, inflight_functions_delete_callback, parser);
- dictionary_register_conflict_callback(parser->inflight.functions, inflight_functions_conflict_callback, parser);
-}
-
-static void inflight_functions_garbage_collect(PARSER *parser, usec_t now) {
- parser->inflight.smaller_timeout = 0;
- struct inflight_function *pf;
- dfe_start_write(parser->inflight.functions, pf) {
- if (pf->timeout_ut < now) {
- internal_error(true,
- "FUNCTION '%s' removing expired transaction '%s', after %"PRIu64" usec.",
- string2str(pf->function), pf_dfe.name, now - pf->started_ut);
-
- if(!buffer_strlen(pf->result_body_wb) || pf->code == HTTP_RESP_OK)
- pf->code = rrd_call_function_error(pf->result_body_wb,
- "Timeout waiting for collector response.",
- HTTP_RESP_GATEWAY_TIMEOUT);
-
- dictionary_del(parser->inflight.functions, pf_dfe.name);
- }
-
- else if(!parser->inflight.smaller_timeout || pf->timeout_ut < parser->inflight.smaller_timeout)
- parser->inflight.smaller_timeout = pf->timeout_ut;
- }
- dfe_done(pf);
-}
-
-void pluginsd_function_cancel(void *data) {
- struct inflight_function *look_for = data, *t;
-
- bool sent = false;
- dfe_start_read(look_for->parser->inflight.functions, t) {
- if(look_for == t) {
- const char *transaction = t_dfe.name;
-
- internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction);
-
- char buffer[2048 + 1];
- snprintfz(buffer, sizeof(buffer) - 1, "%s %s\n",
- PLUGINSD_KEYWORD_FUNCTION_CANCEL,
- transaction);
-
- // send the command to the plugin
- ssize_t ret = send_to_plugin(buffer, t->parser);
- if(ret < 0)
- sent = true;
-
- break;
- }
- }
- dfe_done(t);
-
- if(sent <= 0)
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: FUNCTION_CANCEL request didn't match any pending function requests in pluginsd.d.");
-}
-
-// this is the function that is called from
-// rrd_call_function_and_wait() and rrd_call_function_async()
-static int pluginsd_function_execute_cb(BUFFER *result_body_wb, int timeout, const char *function,
- void *execute_cb_data,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb __maybe_unused,
- void *is_cancelled_cb_data __maybe_unused,
- rrd_function_register_canceller_cb_t register_canceller_cb,
- void *register_canceller_db_data) {
- PARSER *parser = execute_cb_data;
-
- usec_t now = now_realtime_usec();
-
- struct inflight_function tmp = {
- .started_ut = now,
- .timeout_ut = now + timeout * USEC_PER_SEC + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT,
- .result_body_wb = result_body_wb,
- .timeout = timeout,
- .function = string_strdupz(function),
- .result_cb = result_cb,
- .result_cb_data = result_cb_data,
- .payload = NULL,
- .parser = parser,
- };
-
- uuid_t uuid;
- uuid_generate_random(uuid);
-
- char transaction[UUID_STR_LEN];
- uuid_unparse_lower(uuid, transaction);
-
- dictionary_write_lock(parser->inflight.functions);
-
- // if there is any error, our dictionary callbacks will call the caller callback to notify
- // the caller about the error - no need for error handling here.
- void *t = dictionary_set(parser->inflight.functions, transaction, &tmp, sizeof(struct inflight_function));
- if(register_canceller_cb)
- register_canceller_cb(register_canceller_db_data, pluginsd_function_cancel, t);
-
- if(!parser->inflight.smaller_timeout || tmp.timeout_ut < parser->inflight.smaller_timeout)
- parser->inflight.smaller_timeout = tmp.timeout_ut;
-
- // garbage collect stale inflight functions
- if(parser->inflight.smaller_timeout < now)
- inflight_functions_garbage_collect(parser, now);
-
- dictionary_write_unlock(parser->inflight.functions);
-
- return HTTP_RESP_OK;
-}
-
-static inline PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) {
- // a plugin or a child is registering a function
-
- bool global = false;
- size_t i = 1;
- if(num_words >= 2 && strcmp(get_word(words, num_words, 1), "GLOBAL") == 0) {
- i++;
- global = true;
- }
-
- char *name = get_word(words, num_words, i++);
- char *timeout_s = get_word(words, num_words, i++);
- char *help = get_word(words, num_words, i++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_FUNCTION);
- if(!host) return PARSER_RC_ERROR;
-
- RRDSET *st = (global)? NULL: pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_FUNCTION, PLUGINSD_KEYWORD_CHART);
- if(!st) global = true;
-
- if (unlikely(!timeout_s || !name || !help || (!global && !st))) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', help = '%s'). Ignoring it.",
- rrdhost_hostname(host),
- st?rrdset_id(st):"(unset)",
- global?"yes":"no",
- name?name:"(unset)",
- timeout_s?timeout_s:"(unset)",
- help?help:"(unset)"
- );
- return PARSER_RC_ERROR;
- }
-
- int timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
- if (timeout_s && *timeout_s) {
- timeout = str2i(timeout_s);
- if (unlikely(timeout <= 0))
- timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
- }
-
- rrd_function_add(host, st, name, timeout, help, false, pluginsd_function_execute_cb, parser);
-
- parser->user.data_collections_count++;
-
- return PARSER_RC_OK;
-}
-
-static void pluginsd_function_result_end(struct parser *parser, void *action_data) {
- STRING *key = action_data;
- if(key)
- dictionary_del(parser->inflight.functions, string2str(key));
- string_freez(key);
-
- parser->user.data_collections_count++;
-}
-
-static inline PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser) {
- char *key = get_word(words, num_words, 1);
- char *status = get_word(words, num_words, 2);
- char *format = get_word(words, num_words, 3);
- char *expires = get_word(words, num_words, 4);
-
- if (unlikely(!key || !*key || !status || !*status || !format || !*format || !expires || !*expires)) {
- netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')."
- , key ? key : "(unset)"
- , status ? status : "(unset)"
- , format ? format : "(unset)"
- , expires ? expires : "(unset)"
- );
- }
-
- int code = (status && *status) ? str2i(status) : 0;
- if (code <= 0)
- code = HTTP_RESP_BACKEND_RESPONSE_INVALID;
-
- time_t expiration = (expires && *expires) ? str2l(expires) : 0;
-
- struct inflight_function *pf = NULL;
-
- if(key && *key)
- pf = (struct inflight_function *)dictionary_get(parser->inflight.functions, key);
-
- if(!pf) {
- netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", key?key:"(unset)");
- }
- else {
- if(format && *format)
- pf->result_body_wb->content_type = functions_format_to_content_type(format);
-
- pf->code = code;
-
- pf->result_body_wb->expires = expiration;
- if(expiration <= now_realtime_sec())
- buffer_no_cacheable(pf->result_body_wb);
- else
- buffer_cacheable(pf->result_body_wb);
- }
-
- parser->defer.response = (pf) ? pf->result_body_wb : NULL;
- parser->defer.end_keyword = PLUGINSD_KEYWORD_FUNCTION_RESULT_END;
- parser->defer.action = pluginsd_function_result_end;
- parser->defer.action_data = string_strdupz(key); // it is ok is key is NULL
- parser->flags |= PARSER_DEFER_UNTIL_KEYWORD;
-
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER *parser) {
- char *name = get_word(words, num_words, 1);
- char *value = get_word(words, num_words, 2);
- NETDATA_DOUBLE v;
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_VARIABLE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_get_scope_chart(parser);
-
- int global = (st) ? 0 : 1;
-
- if (name && *name) {
- if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
- global = 1;
- name = get_word(words, num_words, 2);
- value = get_word(words, num_words, 3);
- } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
- global = 0;
- name = get_word(words, num_words, 2);
- value = get_word(words, num_words, 3);
- }
- }
-
- if (unlikely(!name || !*name))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "missing variable name");
-
- if (unlikely(!value || !*value))
- value = NULL;
-
- if (unlikely(!value)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- (global) ? "HOST" : "CHART",
- name);
- return PARSER_RC_OK;
- }
-
- if (!global && !st)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "no chart is defined and no GLOBAL is given");
-
- char *endptr = NULL;
- v = (NETDATA_DOUBLE) str2ndd_encoded(value, &endptr);
- if (unlikely(endptr && *endptr)) {
- if (endptr == value)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- value,
- name);
- else
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- value,
- name,
- endptr);
- }
-
- if (global) {
- const RRDVAR_ACQUIRED *rva = rrdvar_custom_host_variable_add_and_acquire(host, name);
- if (rva) {
- rrdvar_custom_host_variable_set(host, rva, v);
- rrdvar_custom_host_variable_release(host, rva);
- }
- else
- netdata_log_error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'",
- rrdhost_hostname(host),
- name);
- } else {
- const RRDSETVAR_ACQUIRED *rsa = rrdsetvar_custom_chart_variable_add_and_acquire(st, name);
- if (rsa) {
- rrdsetvar_custom_chart_variable_set(st, rsa, v);
- rrdsetvar_custom_chart_variable_release(st, rsa);
- }
- else
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'",
- rrdhost_hostname(host), rrdset_id(st), name);
- }
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_flush(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- netdata_log_debug(D_PLUGINSD, "requested a " PLUGINSD_KEYWORD_FLUSH);
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_FLUSH);
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_disable(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- netdata_log_info("PLUGINSD: plugin called DISABLE. Disabling it.");
- parser->user.enabled = 0;
- return PARSER_RC_STOP;
-}
-
-static inline PARSER_RC pluginsd_label(char **words, size_t num_words, PARSER *parser) {
- const char *name = get_word(words, num_words, 1);
- const char *label_source = get_word(words, num_words, 2);
- const char *value = get_word(words, num_words, 3);
-
- if (!name || !label_source || !value)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_LABEL, "missing parameters");
-
- char *store = (char *)value;
- bool allocated_store = false;
-
- if(unlikely(num_words > 4)) {
- allocated_store = true;
- store = mallocz(PLUGINSD_LINE_MAX + 1);
- size_t remaining = PLUGINSD_LINE_MAX;
- char *move = store;
- char *word;
- for(size_t i = 3; i < num_words && remaining > 2 && (word = get_word(words, num_words, i)) ;i++) {
- if(i > 3) {
- *move++ = ' ';
- *move = '\0';
- remaining--;
- }
-
- size_t length = strlen(word);
- if (length > remaining)
- length = remaining;
-
- remaining -= length;
- memcpy(move, word, length);
- move += length;
- *move = '\0';
- }
- }
-
- if(unlikely(!(parser->user.new_host_labels)))
- parser->user.new_host_labels = rrdlabels_create();
-
- if (strcmp(name,HOST_LABEL_IS_EPHEMERAL) == 0) {
- int is_ephemeral = appconfig_test_boolean_value((char *) value);
- if (is_ephemeral) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_LABEL);
- if (likely(host))
- rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST);
- }
- }
-
- rrdlabels_add(parser->user.new_host_labels, name, store, str2l(label_source));
-
- if (allocated_store)
- freez(store);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_overwrite(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_OVERWRITE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- netdata_log_debug(D_PLUGINSD, "requested to OVERWRITE host labels");
-
- if(unlikely(!host->rrdlabels))
- host->rrdlabels = rrdlabels_create();
-
- rrdlabels_migrate_to_these(host->rrdlabels, parser->user.new_host_labels);
- if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST))
- rrdlabels_add(host->rrdlabels, HOST_LABEL_IS_EPHEMERAL, "true", RRDLABEL_SRC_CONFIG);
- rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_LABELS | RRDHOST_FLAG_METADATA_UPDATE);
-
- rrdlabels_destroy(parser->user.new_host_labels);
- parser->user.new_host_labels = NULL;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER *parser) {
- const char *name = get_word(words, num_words, 1);
- const char *value = get_word(words, num_words, 2);
- const char *label_source = get_word(words, num_words, 3);
-
- if (!name || !value || !label_source) {
- netdata_log_error("Ignoring malformed or empty CHART LABEL command.");
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- if(unlikely(!parser->user.chart_rrdlabels_linked_temporarily)) {
- RRDSET *st = pluginsd_get_scope_chart(parser);
- parser->user.chart_rrdlabels_linked_temporarily = st->rrdlabels;
- rrdlabels_unmark_all(parser->user.chart_rrdlabels_linked_temporarily);
- }
-
- rrdlabels_add(parser->user.chart_rrdlabels_linked_temporarily, name, value, str2l(label_source));
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- netdata_log_debug(D_PLUGINSD, "requested to commit chart labels");
-
- if(!parser->user.chart_rrdlabels_linked_temporarily) {
- netdata_log_error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host));
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- rrdlabels_remove_all_unmarked(parser->user.chart_rrdlabels_linked_temporarily);
-
- rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE);
- rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE);
- rrdset_metadata_updated(st);
-
- parser->user.chart_rrdlabels_linked_temporarily = NULL;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *start_time_str = get_word(words, num_words, idx++);
- char *end_time_str = get_word(words, num_words, idx++);
- char *child_now_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st;
- if (likely(!id || !*id))
- st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- else
- st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_REPLAY_BEGIN);
-
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_REPLAY_BEGIN))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(start_time_str && end_time_str) {
- time_t start_time = (time_t) str2ull_encoded(start_time_str);
- time_t end_time = (time_t) str2ull_encoded(end_time_str);
-
- time_t wall_clock_time = 0, tolerance;
- bool wall_clock_comes_from_child; (void)wall_clock_comes_from_child;
- if(child_now_str) {
- wall_clock_time = (time_t) str2ull_encoded(child_now_str);
- tolerance = st->update_every + 1;
- wall_clock_comes_from_child = true;
- }
-
- if(wall_clock_time <= 0) {
- wall_clock_time = now_realtime_sec();
- tolerance = st->update_every + 5;
- wall_clock_comes_from_child = false;
- }
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- internal_error(
- (!st->replay.start_streaming && (end_time < st->replay.after || start_time > st->replay.before)),
- "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, which does not match our request (%ld to %ld).",
- rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, st->replay.after, st->replay.before);
-
- internal_error(
- true,
- "REPLAY: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, child wall clock is %ld (%s), had requested %ld to %ld",
- rrdhost_hostname(st->rrdhost), rrdset_id(st),
- start_time, end_time, wall_clock_time, wall_clock_comes_from_child ? "from child" : "parent time",
- st->replay.after, st->replay.before);
-#endif
-
- if(start_time && end_time && start_time < wall_clock_time + tolerance && end_time < wall_clock_time + tolerance && start_time < end_time) {
- if (unlikely(end_time - start_time != st->update_every))
- rrdset_set_update_every_s(st, end_time - start_time);
-
- st->last_collected_time.tv_sec = end_time;
- st->last_collected_time.tv_usec = 0;
-
- st->last_updated.tv_sec = end_time;
- st->last_updated.tv_usec = 0;
-
- st->counter++;
- st->counter_done++;
-
- // these are only needed for db mode RAM, SAVE, MAP, ALLOC
- st->db.current_entry++;
- if(st->db.current_entry >= st->db.entries)
- st->db.current_entry -= st->db.entries;
-
- parser->user.replay.start_time = start_time;
- parser->user.replay.end_time = end_time;
- parser->user.replay.start_time_ut = (usec_t) start_time * USEC_PER_SEC;
- parser->user.replay.end_time_ut = (usec_t) end_time * USEC_PER_SEC;
- parser->user.replay.wall_clock_time = wall_clock_time;
- parser->user.replay.rset_enabled = true;
-
- return PARSER_RC_OK;
- }
-
- netdata_log_error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN
- " from %ld to %ld, but timestamps are invalid "
- "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET,
- rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time,
- wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock",
- tolerance);
- }
-
- // the child sends an RBEGIN without any parameters initially
- // setting rset_enabled to false, means the RSET should not store any metrics
- // to store metrics, the RBEGIN needs to have timestamps
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- parser->user.replay.wall_clock_time = 0;
- parser->user.replay.rset_enabled = false;
- return PARSER_RC_OK;
-}
-
-static inline SN_FLAGS pluginsd_parse_storage_number_flags(const char *flags_str) {
- SN_FLAGS flags = SN_FLAG_NONE;
-
- char c;
- while ((c = *flags_str++)) {
- switch (c) {
- case 'A':
- flags |= SN_FLAG_NOT_ANOMALOUS;
- break;
-
- case 'R':
- flags |= SN_FLAG_RESET;
- break;
-
- case 'E':
- flags = SN_EMPTY_SLOT;
- return flags;
-
- default:
- internal_error(true, "Unknown SN_FLAGS flag '%c'", c);
- break;
- }
- }
-
- return flags;
-}
-
-static inline PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *value_str = get_word(words, num_words, idx++);
- char *flags_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_SET);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!parser->user.replay.rset_enabled) {
- nd_log_limit_static_thread_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_ERR,
- "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors",
- rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN);
-
- // we have to return OK here
- return PARSER_RC_OK;
- }
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_SET);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if (unlikely(!parser->user.replay.start_time || !parser->user.replay.end_time)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.",
- rrdhost_hostname(host),
- rrdset_id(st),
- dimension,
- PLUGINSD_KEYWORD_REPLAY_SET,
- parser->user.replay.start_time,
- parser->user.replay.end_time,
- PLUGINSD_KEYWORD_REPLAY_BEGIN);
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- if (unlikely(!value_str || !*value_str))
- value_str = "NAN";
-
- if(unlikely(!flags_str))
- flags_str = "";
-
- if (likely(value_str)) {
- RRDDIM_FLAGS rd_flags = rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED);
-
- if(!(rd_flags & RRDDIM_FLAG_ARCHIVED)) {
- NETDATA_DOUBLE value = str2ndd_encoded(value_str, NULL);
- SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str);
-
- if (!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT)) {
- value = NAN;
- flags = SN_EMPTY_SLOT;
- }
-
- rrddim_store_metric(rd, parser->user.replay.end_time_ut, value, flags);
- rd->collector.last_collected_time.tv_sec = parser->user.replay.end_time;
- rd->collector.last_collected_time.tv_usec = 0;
- rd->collector.counter++;
- }
- else {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING,
- "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. "
- "Ignoring data.",
- rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_name(rd));
- }
- }
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser) {
- if(parser->user.replay.rset_enabled == false)
- return PARSER_RC_OK;
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *last_collected_ut_str = get_word(words, num_words, idx++);
- char *last_collected_value_str = get_word(words, num_words, idx++);
- char *last_calculated_value_str = get_word(words, num_words, idx++);
- char *last_stored_value_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(st->pluginsd.set) {
- // reset pos to reuse the same RDAs
- st->pluginsd.pos = 0;
- st->pluginsd.set = false;
- }
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t dim_last_collected_ut = (usec_t)rd->collector.last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)rd->collector.last_collected_time.tv_usec;
- usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0;
- if(last_collected_ut > dim_last_collected_ut) {
- rd->collector.last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC);
- rd->collector.last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC);
- }
-
- rd->collector.last_collected_value = last_collected_value_str ? str2ll_encoded(last_collected_value_str) : 0;
- rd->collector.last_calculated_value = last_calculated_value_str ? str2ndd_encoded(last_calculated_value_str, NULL) : 0;
- rd->collector.last_stored_value = last_stored_value_str ? str2ndd_encoded(last_stored_value_str, NULL) : 0.0;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser) {
- if(parser->user.replay.rset_enabled == false)
- return PARSER_RC_OK;
-
- char *last_collected_ut_str = get_word(words, num_words, 1);
- char *last_updated_ut_str = get_word(words, num_words, 2);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE,
- PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t chart_last_collected_ut = (usec_t)st->last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)st->last_collected_time.tv_usec;
- usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0;
- if(last_collected_ut > chart_last_collected_ut) {
- st->last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC);
- st->last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC);
- }
-
- usec_t chart_last_updated_ut = (usec_t)st->last_updated.tv_sec * USEC_PER_SEC + (usec_t)st->last_updated.tv_usec;
- usec_t last_updated_ut = last_updated_ut_str ? str2ull_encoded(last_updated_ut_str) : 0;
- if(last_updated_ut > chart_last_updated_ut) {
- st->last_updated.tv_sec = (time_t)(last_updated_ut / USEC_PER_SEC);
- st->last_updated.tv_usec = (last_updated_ut % USEC_PER_SEC);
- }
-
- st->counter++;
- st->counter_done++;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser) {
- if (num_words < 7) { // accepts 7, but the 7th is optional
- netdata_log_error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command");
- return PARSER_RC_ERROR;
- }
-
- const char *update_every_child_txt = get_word(words, num_words, 1);
- const char *first_entry_child_txt = get_word(words, num_words, 2);
- const char *last_entry_child_txt = get_word(words, num_words, 3);
- const char *start_streaming_txt = get_word(words, num_words, 4);
- const char *first_entry_requested_txt = get_word(words, num_words, 5);
- const char *last_entry_requested_txt = get_word(words, num_words, 6);
- const char *child_world_time_txt = get_word(words, num_words, 7); // optional
-
- time_t update_every_child = (time_t) str2ull_encoded(update_every_child_txt);
- time_t first_entry_child = (time_t) str2ull_encoded(first_entry_child_txt);
- time_t last_entry_child = (time_t) str2ull_encoded(last_entry_child_txt);
-
- bool start_streaming = (strcmp(start_streaming_txt, "true") == 0);
- time_t first_entry_requested = (time_t) str2ull_encoded(first_entry_requested_txt);
- time_t last_entry_requested = (time_t) str2ull_encoded(last_entry_requested_txt);
-
- // the optional child world time
- time_t child_world_time = (child_world_time_txt && *child_world_time_txt) ? (time_t) str2ull_encoded(
- child_world_time_txt) : now_realtime_sec();
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- internal_error(true,
- "PLUGINSD REPLAY: 'host:%s/chart:%s': got a " PLUGINSD_KEYWORD_REPLAY_END " child db from %llu to %llu, start_streaming %s, had requested from %llu to %llu, wall clock %llu",
- rrdhost_hostname(host), rrdset_id(st),
- (unsigned long long)first_entry_child, (unsigned long long)last_entry_child,
- start_streaming?"true":"false",
- (unsigned long long)first_entry_requested, (unsigned long long)last_entry_requested,
- (unsigned long long)child_world_time
- );
-#endif
-
- parser->user.data_collections_count++;
-
- if(parser->user.replay.rset_enabled && st->rrdhost->receiver) {
- time_t now = now_realtime_sec();
- time_t started = st->rrdhost->receiver->replication_first_time_t;
- time_t current = parser->user.replay.end_time;
-
- if(started && current > started) {
- host->rrdpush_receiver_replication_percent = (NETDATA_DOUBLE) (current - started) * 100.0 / (NETDATA_DOUBLE) (now - started);
- worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION,
- host->rrdpush_receiver_replication_percent);
- }
- }
-
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- parser->user.replay.wall_clock_time = 0;
- parser->user.replay.rset_enabled = false;
-
- st->counter++;
- st->counter_done++;
- store_metric_collection_completed();
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- st->replay.start_streaming = false;
- st->replay.after = 0;
- st->replay.before = 0;
- if(start_streaming)
- st->replay.log_next_data_collection = true;
-#endif
-
- if (start_streaming) {
- if (st->update_every != update_every_child)
- rrdset_set_update_every_s(st, update_every_child);
-
- if(rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) {
- rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED);
- rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS);
- rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK);
- rrdhost_receiver_replicating_charts_minus_one(st->rrdhost);
- }
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- else
- internal_error(true, "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_END " with enable_streaming = true, but there is no replication in progress for this chart.",
- rrdhost_hostname(host), rrdset_id(st));
-#endif
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END);
-
- host->rrdpush_receiver_replication_percent = 100.0;
- worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, host->rrdpush_receiver_replication_percent);
-
- return PARSER_RC_OK;
- }
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END);
-
- rrdcontext_updated_retention_rrdset(st);
-
- bool ok = replicate_chart_request(send_to_plugin, parser, host, st,
- first_entry_child, last_entry_child, child_world_time,
- first_entry_requested, last_entry_requested);
- return ok ? PARSER_RC_OK : PARSER_RC_ERROR;
-}
-
-static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER *parser) {
- timing_init();
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *update_every_str = get_word(words, num_words, idx++);
- char *end_time_str = get_word(words, num_words, idx++);
- char *wall_clock_time_str = get_word(words, num_words, idx++);
-
- if(unlikely(!id || !update_every_str || !end_time_str || !wall_clock_time_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_BEGIN_V2, "missing parameters");
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- timing_step(TIMING_STEP_BEGIN2_PREPARE);
-
- RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN_V2);
-
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN_V2))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
-
- timing_step(TIMING_STEP_BEGIN2_FIND_CHART);
-
- // ------------------------------------------------------------------------
- // parse the parameters
-
- time_t update_every = (time_t) str2ull_encoded(update_every_str);
- time_t end_time = (time_t) str2ull_encoded(end_time_str);
-
- time_t wall_clock_time;
- if(likely(*wall_clock_time_str == '#'))
- wall_clock_time = end_time;
- else
- wall_clock_time = (time_t) str2ull_encoded(wall_clock_time_str);
-
- if (unlikely(update_every != st->update_every))
- rrdset_set_update_every_s(st, update_every);
-
- timing_step(TIMING_STEP_BEGIN2_PARSE);
-
- // ------------------------------------------------------------------------
- // prepare our state
-
- pluginsd_lock_rrdset_data_collection(parser);
-
- parser->user.v2.update_every = update_every;
- parser->user.v2.end_time = end_time;
- parser->user.v2.wall_clock_time = wall_clock_time;
- parser->user.v2.ml_locked = ml_chart_update_begin(st);
-
- timing_step(TIMING_STEP_BEGIN2_ML);
-
- // ------------------------------------------------------------------------
- // propagate it forward in v2
-
- if(!parser->user.v2.stream_buffer.wb && rrdhost_has_rrdpush_sender_enabled(st->rrdhost))
- parser->user.v2.stream_buffer = rrdset_push_metric_initialize(parser->user.st, wall_clock_time);
-
- if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.wb) {
- // check receiver capabilities
- bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754);
-
- // check sender capabilities
- bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false;
- NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX;
-
- BUFFER *wb = parser->user.v2.stream_buffer.wb;
-
- buffer_need_bytes(wb, 1024);
-
- if(unlikely(parser->user.v2.stream_buffer.begin_v2_added))
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1);
-
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1);
-
- if(with_slots) {
- buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2);
- buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot);
- }
-
- buffer_fast_strcat(wb, " '", 2);
- buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id));
- buffer_fast_strcat(wb, "' ", 2);
-
- if(can_copy)
- buffer_strcat(wb, update_every_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, update_every);
-
- buffer_fast_strcat(wb, " ", 1);
-
- if(can_copy)
- buffer_strcat(wb, end_time_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, end_time);
-
- buffer_fast_strcat(wb, " ", 1);
-
- if(can_copy)
- buffer_strcat(wb, wall_clock_time_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time);
-
- buffer_fast_strcat(wb, "\n", 1);
-
- parser->user.v2.stream_buffer.last_point_end_time_s = end_time;
- parser->user.v2.stream_buffer.begin_v2_added = true;
- }
-
- timing_step(TIMING_STEP_BEGIN2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // store it
-
- st->last_collected_time.tv_sec = end_time;
- st->last_collected_time.tv_usec = 0;
- st->last_updated.tv_sec = end_time;
- st->last_updated.tv_usec = 0;
- st->counter++;
- st->counter_done++;
-
- // these are only needed for db mode RAM, SAVE, MAP, ALLOC
- st->db.current_entry++;
- if(st->db.current_entry >= st->db.entries)
- st->db.current_entry -= st->db.entries;
-
- timing_step(TIMING_STEP_BEGIN2_STORE);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER *parser) {
- timing_init();
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *collected_str = get_word(words, num_words, idx++);
- char *value_str = get_word(words, num_words, idx++);
- char *flags_str = get_word(words, num_words, idx++);
-
- if(unlikely(!dimension || !collected_str || !value_str || !flags_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_SET_V2, "missing parameters");
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET_V2, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- timing_step(TIMING_STEP_SET2_PREPARE);
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET_V2);
- if(unlikely(!rd)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED)))
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- timing_step(TIMING_STEP_SET2_LOOKUP_DIMENSION);
-
- // ------------------------------------------------------------------------
- // parse the parameters
-
- collected_number collected_value = (collected_number) str2ll_encoded(collected_str);
-
- NETDATA_DOUBLE value;
- if(*value_str == '#')
- value = (NETDATA_DOUBLE)collected_value;
- else
- value = str2ndd_encoded(value_str, NULL);
-
- SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str);
-
- timing_step(TIMING_STEP_SET2_PARSE);
-
- // ------------------------------------------------------------------------
- // check value and ML
-
- if (unlikely(!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT))) {
- value = NAN;
- flags = SN_EMPTY_SLOT;
-
- if(parser->user.v2.ml_locked)
- ml_dimension_is_anomalous(rd, parser->user.v2.end_time, 0, false);
- }
- else if(parser->user.v2.ml_locked) {
- if (ml_dimension_is_anomalous(rd, parser->user.v2.end_time, value, true)) {
- // clear anomaly bit: 0 -> is anomalous, 1 -> not anomalous
- flags &= ~((storage_number) SN_FLAG_NOT_ANOMALOUS);
- }
- else
- flags |= SN_FLAG_NOT_ANOMALOUS;
- }
-
- timing_step(TIMING_STEP_SET2_ML);
-
- // ------------------------------------------------------------------------
- // propagate it forward in v2
-
- if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb) {
- // check if receiver and sender have the same number parsing capabilities
- bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754);
-
- // check the sender capabilities
- bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false;
- NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX;
- NUMBER_ENCODING doubles_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL;
-
- BUFFER *wb = parser->user.v2.stream_buffer.wb;
- buffer_need_bytes(wb, 1024);
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1);
-
- if(with_slots) {
- buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2);
- buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot);
- }
-
- buffer_fast_strcat(wb, " '", 2);
- buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id));
- buffer_fast_strcat(wb, "' ", 2);
- if(can_copy)
- buffer_strcat(wb, collected_str);
- else
- buffer_print_int64_encoded(wb, integer_encoding, collected_value); // original v2 had hex
- buffer_fast_strcat(wb, " ", 1);
- if(can_copy)
- buffer_strcat(wb, value_str);
- else
- buffer_print_netdata_double_encoded(wb, doubles_encoding, value); // original v2 had decimal
- buffer_fast_strcat(wb, " ", 1);
- buffer_print_sn_flags(wb, flags, true);
- buffer_fast_strcat(wb, "\n", 1);
- }
-
- timing_step(TIMING_STEP_SET2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // store it
-
- rrddim_store_metric(rd, parser->user.v2.end_time * USEC_PER_SEC, value, flags);
- rd->collector.last_collected_time.tv_sec = parser->user.v2.end_time;
- rd->collector.last_collected_time.tv_usec = 0;
- rd->collector.last_collected_value = collected_value;
- rd->collector.last_stored_value = value;
- rd->collector.last_calculated_value = value;
- rd->collector.counter++;
- rrddim_set_updated(rd);
-
- timing_step(TIMING_STEP_SET2_STORE);
-
- return PARSER_RC_OK;
-}
-
-void pluginsd_cleanup_v2(PARSER *parser) {
- // this is called when the thread is stopped while processing
- pluginsd_clear_scope_chart(parser, "THREAD CLEANUP");
-}
-
-static inline PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- timing_init();
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- parser->user.data_collections_count++;
-
- timing_step(TIMING_STEP_END2_PREPARE);
-
- // ------------------------------------------------------------------------
- // propagate the whole chart update in v1
-
- if(unlikely(!parser->user.v2.stream_buffer.v2 && !parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb))
- rrdset_push_metrics_v1(&parser->user.v2.stream_buffer, st);
-
- timing_step(TIMING_STEP_END2_PUSH_V1);
-
- // ------------------------------------------------------------------------
- // unblock data collection
-
- pluginsd_unlock_previous_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, false);
- rrdcontext_collected_rrdset(st);
- store_metric_collection_completed();
-
- timing_step(TIMING_STEP_END2_RRDSET);
-
- // ------------------------------------------------------------------------
- // propagate it forward
-
- rrdset_push_metrics_finished(&parser->user.v2.stream_buffer, st);
-
- timing_step(TIMING_STEP_END2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // cleanup RRDSET / RRDDIM
-
- if(likely(st->pluginsd.dims_with_slots)) {
- for(size_t i = 0; i < st->pluginsd.size ;i++) {
- RRDDIM *rd = st->pluginsd.prd_array[i].rd;
-
- if(!rd)
- continue;
-
- rd->collector.calculated_value = 0;
- rd->collector.collected_value = 0;
- rrddim_clear_updated(rd);
- }
- }
- else {
- RRDDIM *rd;
- rrddim_foreach_read(rd, st){
- rd->collector.calculated_value = 0;
- rd->collector.collected_value = 0;
- rrddim_clear_updated(rd);
- }
- rrddim_foreach_done(rd);
- }
-
- // ------------------------------------------------------------------------
- // reset state
-
- parser->user.v2 = (struct parser_user_object_v2){ 0 };
-
- timing_step(TIMING_STEP_END2_STORE);
- timing_report();
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_exit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- netdata_log_info("PLUGINSD: plugin called EXIT.");
- return PARSER_RC_STOP;
-}
-
-struct mutex_cond {
- pthread_mutex_t lock;
- pthread_cond_t cond;
- int rc;
-};
-
-static void virt_fnc_got_data_cb(BUFFER *wb __maybe_unused, int code, void *callback_data)
-{
- struct mutex_cond *ctx = callback_data;
- pthread_mutex_lock(&ctx->lock);
- ctx->rc = code;
- pthread_cond_broadcast(&ctx->cond);
- pthread_mutex_unlock(&ctx->lock);
-}
-
-#define VIRT_FNC_TIMEOUT 1
-#define VIRT_FNC_BUF_SIZE (4096)
-void call_virtual_function_async(BUFFER *wb, RRDHOST *host, const char *name, const char *payload, rrd_function_result_callback_t callback, void *callback_data) {
- PARSER *parser = NULL;
-
- //TODO simplify (as we really need only first parameter to get plugin name maybe we can avoid parsing all)
- char *words[PLUGINSD_MAX_WORDS];
- char *function_with_params = strdupz(name);
- size_t num_words = quoted_strings_splitter(function_with_params, words, PLUGINSD_MAX_WORDS, isspace_map_pluginsd);
-
- if (num_words < 2) {
- netdata_log_error("PLUGINSD: virtual function name is empty.");
- freez(function_with_params);
- return;
- }
-
- const DICTIONARY_ITEM *cpi = dictionary_get_and_acquire_item(host->configurable_plugins, get_word(words, num_words, 1));
- if (unlikely(cpi == NULL)) {
- netdata_log_error("PLUGINSD: virtual function plugin '%s' not found.", name);
- freez(function_with_params);
- return;
- }
- struct configurable_plugin *cp = dictionary_acquired_item_value(cpi);
- parser = (PARSER *)cp->cb_usr_ctx;
-
- BUFFER *function_out = buffer_create(VIRT_FNC_BUF_SIZE, NULL);
- // if we are forwarding this to a plugin (as opposed to streaming/child) we have to remove the first parameter (plugin_name)
- buffer_strcat(function_out, get_word(words, num_words, 0));
- for (size_t i = 1; i < num_words; i++) {
- if (i == 1 && SERVING_PLUGINSD(parser))
- continue;
- buffer_sprintf(function_out, " %s", get_word(words, num_words, i));
- }
- freez(function_with_params);
-
- usec_t now = now_realtime_usec();
-
- struct inflight_function tmp = {
- .started_ut = now,
- .timeout_ut = now + VIRT_FNC_TIMEOUT + USEC_PER_SEC,
- .result_body_wb = wb,
- .timeout = VIRT_FNC_TIMEOUT * 10,
- .function = string_strdupz(buffer_tostring(function_out)),
- .result_cb = callback,
- .result_cb_data = callback_data,
- .payload = payload != NULL ? strdupz(payload) : NULL,
- .virtual = true,
- };
- buffer_free(function_out);
-
- uuid_t uuid;
- uuid_generate_time(uuid);
-
- char key[UUID_STR_LEN];
- uuid_unparse_lower(uuid, key);
-
- dictionary_write_lock(parser->inflight.functions);
-
- // if there is any error, our dictionary callbacks will call the caller callback to notify
- // the caller about the error - no need for error handling here.
- dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function));
-
- if(!parser->inflight.smaller_timeout || tmp.timeout_ut < parser->inflight.smaller_timeout)
- parser->inflight.smaller_timeout = tmp.timeout_ut;
-
- // garbage collect stale inflight functions
- if(parser->inflight.smaller_timeout < now)
- inflight_functions_garbage_collect(parser, now);
-
- dictionary_write_unlock(parser->inflight.functions);
-}
-
-
-dyncfg_config_t call_virtual_function_blocking(PARSER *parser, const char *name, int *rc, const char *payload) {
- usec_t now = now_realtime_usec();
- BUFFER *wb = buffer_create(VIRT_FNC_BUF_SIZE, NULL);
-
- struct mutex_cond cond = {
- .lock = PTHREAD_MUTEX_INITIALIZER,
- .cond = PTHREAD_COND_INITIALIZER
- };
-
- struct inflight_function tmp = {
- .started_ut = now,
- .timeout_ut = now + VIRT_FNC_TIMEOUT + USEC_PER_SEC,
- .result_body_wb = wb,
- .timeout = VIRT_FNC_TIMEOUT,
- .function = string_strdupz(name),
- .result_cb = virt_fnc_got_data_cb,
- .result_cb_data = &cond,
- .payload = payload != NULL ? strdupz(payload) : NULL,
- .virtual = true,
- };
-
- uuid_t uuid;
- uuid_generate_time(uuid);
-
- char key[UUID_STR_LEN];
- uuid_unparse_lower(uuid, key);
-
- dictionary_write_lock(parser->inflight.functions);
-
- // if there is any error, our dictionary callbacks will call the caller callback to notify
- // the caller about the error - no need for error handling here.
- dictionary_set(parser->inflight.functions, key, &tmp, sizeof(struct inflight_function));
-
- if(!parser->inflight.smaller_timeout || tmp.timeout_ut < parser->inflight.smaller_timeout)
- parser->inflight.smaller_timeout = tmp.timeout_ut;
-
- // garbage collect stale inflight functions
- if(parser->inflight.smaller_timeout < now)
- inflight_functions_garbage_collect(parser, now);
-
- dictionary_write_unlock(parser->inflight.functions);
-
- struct timespec tp;
- clock_gettime(CLOCK_REALTIME, &tp);
- tp.tv_sec += (time_t)VIRT_FNC_TIMEOUT;
-
- pthread_mutex_lock(&cond.lock);
-
- int ret = pthread_cond_timedwait(&cond.cond, &cond.lock, &tp);
- if (ret == ETIMEDOUT)
- netdata_log_error("PLUGINSD: DYNCFG virtual function %s timed out", name);
-
- pthread_mutex_unlock(&cond.lock);
-
- dyncfg_config_t cfg;
- cfg.data = strdupz(buffer_tostring(wb));
- cfg.data_size = buffer_strlen(wb);
-
- if (rc != NULL)
- *rc = cond.rc;
-
- buffer_free(wb);
- return cfg;
-}
-
-#define CVF_MAX_LEN (1024)
-static dyncfg_config_t get_plugin_config_cb(void *usr_ctx, const char *plugin_name)
-{
- PARSER *parser = usr_ctx;
-
- if (SERVING_STREAMING(parser)) {
- char buf[CVF_MAX_LEN + 1];
- snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG " %s", plugin_name);
- return call_virtual_function_blocking(parser, buf, NULL, NULL);
- }
-
- return call_virtual_function_blocking(parser, FUNCTION_NAME_GET_PLUGIN_CONFIG, NULL, NULL);
-}
-
-static dyncfg_config_t get_plugin_config_schema_cb(void *usr_ctx, const char *plugin_name)
-{
- PARSER *parser = usr_ctx;
-
- if (SERVING_STREAMING(parser)) {
- char buf[CVF_MAX_LEN + 1];
- snprintfz(buf, CVF_MAX_LEN, FUNCTION_NAME_GET_PLUGIN_CONFIG_SCHEMA " %s", plugin_name);
- return call_virtual_function_blocking(parser, buf, NULL, NULL);
- }
-
- return call_virtual_function_blocking(parser, "get_plugin_config_schema", NULL, NULL);
-}
-
-static dyncfg_config_t get_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG);
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s", module_name);
-
- dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
-
- buffer_free(wb);
-
- return ret;
-}
-
-static dyncfg_config_t get_module_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_GET_MODULE_CONFIG_SCHEMA);
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s", module_name);
-
- dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
-
- buffer_free(wb);
-
- return ret;
-}
-
-static dyncfg_config_t get_job_config_schema_cb(void *usr_ctx, const char *plugin_name, const char *module_name)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG_SCHEMA);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s", module_name);
-
- dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
-
- buffer_free(wb);
-
- return ret;
-}
-
-static dyncfg_config_t get_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char* job_name)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_GET_JOB_CONFIG);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s %s", module_name, job_name);
-
- dyncfg_config_t ret = call_virtual_function_blocking(parser, buffer_tostring(wb), NULL, NULL);
-
- buffer_free(wb);
-
- return ret;
-}
-
-enum set_config_result set_plugin_config_cb(void *usr_ctx, const char *plugin_name, dyncfg_config_t *cfg)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_SET_PLUGIN_CONFIG);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- int rc;
- call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
-
- buffer_free(wb);
- if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return SET_CONFIG_REJECTED;
- return SET_CONFIG_ACCEPTED;
-}
-
-enum set_config_result set_module_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, dyncfg_config_t *cfg)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_SET_MODULE_CONFIG);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s", module_name);
-
- int rc;
- call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
-
- buffer_free(wb);
-
- if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return SET_CONFIG_REJECTED;
- return SET_CONFIG_ACCEPTED;
-}
-
-enum set_config_result set_job_config_cb(void *usr_ctx, const char *plugin_name, const char *module_name, const char *job_name, dyncfg_config_t *cfg)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_SET_JOB_CONFIG);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s %s", module_name, job_name);
-
- int rc;
- call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, cfg->data);
-
- buffer_free(wb);
-
- if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return SET_CONFIG_REJECTED;
- return SET_CONFIG_ACCEPTED;
-}
-
-enum set_config_result delete_job_cb(void *usr_ctx, const char *plugin_name ,const char *module_name, const char *job_name)
-{
- PARSER *parser = usr_ctx;
- BUFFER *wb = buffer_create(CVF_MAX_LEN, NULL);
-
- buffer_strcat(wb, FUNCTION_NAME_DELETE_JOB);
-
- if (SERVING_STREAMING(parser))
- buffer_sprintf(wb, " %s", plugin_name);
-
- buffer_sprintf(wb, " %s %s", module_name, job_name);
-
- int rc;
- call_virtual_function_blocking(parser, buffer_tostring(wb), &rc, NULL);
-
- buffer_free(wb);
-
- if(rc != DYNCFG_VFNC_RET_CFG_ACCEPTED)
- return SET_CONFIG_REJECTED;
- return SET_CONFIG_ACCEPTED;
-}
-
-
-static inline PARSER_RC pluginsd_register_plugin(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- netdata_log_info("PLUGINSD: DYNCFG_ENABLE");
-
- if (unlikely (num_words != 2))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_ENABLE, "missing name parameter");
-
- struct configurable_plugin *cfg = callocz(1, sizeof(struct configurable_plugin));
-
- cfg->name = strdupz(words[1]);
- cfg->set_config_cb = set_plugin_config_cb;
- cfg->get_config_cb = get_plugin_config_cb;
- cfg->get_config_schema_cb = get_plugin_config_schema_cb;
- cfg->cb_usr_ctx = parser;
-
- const DICTIONARY_ITEM *di = register_plugin(parser->user.host->configurable_plugins, cfg, SERVING_PLUGINSD(parser));
- if (unlikely(di == NULL)) {
- freez(cfg->name);
- freez(cfg);
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_ENABLE, "error registering plugin");
- }
-
- if (SERVING_PLUGINSD(parser)) {
- // this is optimization for pluginsd to avoid extra dictionary lookup
- // as we know which plugin is comunicating with us
- parser->user.cd->cfg_dict_item = di;
- parser->user.cd->configuration = cfg;
- } else {
- // register_plugin keeps the item acquired, so we need to release it
- dictionary_acquired_item_release(parser->user.host->configurable_plugins, di);
- }
-
- rrdpush_send_dyncfg_enable(parser->user.host, cfg->name);
-
- return PARSER_RC_OK;
-}
-
-#define LOG_MSG_SIZE (1024)
-#define MODULE_NAME_IDX (SERVING_PLUGINSD(parser) ? 1 : 2)
-#define MODULE_TYPE_IDX (SERVING_PLUGINSD(parser) ? 2 : 3)
-static inline PARSER_RC pluginsd_register_module(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- netdata_log_info("PLUGINSD: DYNCFG_REG_MODULE");
-
- size_t expected_num_words = SERVING_PLUGINSD(parser) ? 3 : 4;
-
- if (unlikely(num_words != expected_num_words)) {
- char log[LOG_MSG_SIZE + 1];
- snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name module_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name ");
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, log);
- }
-
- struct configurable_plugin *plug_cfg;
- const DICTIONARY_ITEM *di = NULL;
- if (SERVING_PLUGINSD(parser)) {
- plug_cfg = parser->user.cd->configuration;
- if (unlikely(plug_cfg == NULL))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "you have to enable dynamic configuration first using " PLUGINSD_KEYWORD_DYNCFG_ENABLE);
- } else {
- di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]);
- if (unlikely(di == NULL))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "plugin not found");
-
- plug_cfg = (struct configurable_plugin *)dictionary_acquired_item_value(di);
- }
-
- struct module *mod = callocz(1, sizeof(struct module));
-
- mod->type = str2_module_type(words[MODULE_TYPE_IDX]);
- if (unlikely(mod->type == MOD_TYPE_UNKNOWN)) {
- freez(mod);
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE, "unknown module type (allowed: job_array, single)");
- }
-
- mod->name = strdupz(words[MODULE_NAME_IDX]);
-
- mod->set_config_cb = set_module_config_cb;
- mod->get_config_cb = get_module_config_cb;
- mod->get_config_schema_cb = get_module_config_schema_cb;
- mod->config_cb_usr_ctx = parser;
-
- mod->get_job_config_cb = get_job_config_cb;
- mod->get_job_config_schema_cb = get_job_config_schema_cb;
- mod->set_job_config_cb = set_job_config_cb;
- mod->delete_job_cb = delete_job_cb;
- mod->job_config_cb_usr_ctx = parser;
-
- register_module(parser->user.host->configurable_plugins, plug_cfg, mod, SERVING_PLUGINSD(parser));
-
- if (di != NULL)
- dictionary_acquired_item_release(parser->user.host->configurable_plugins, di);
-
- rrdpush_send_dyncfg_reg_module(parser->user.host, plug_cfg->name, mod->name, mod->type);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_register_job_common(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused, const char *plugin_name) {
- const char *module_name = words[0];
- const char *job_name = words[1];
- const char *job_type_str = words[2];
- const char *flags_str = words[3];
-
- long f = str2l(flags_str);
-
- if (f < 0)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "invalid flags received");
-
- dyncfg_job_flg_t flags = f;
-
- if (SERVING_PLUGINSD(parser))
- flags |= JOB_FLG_PLUGIN_PUSHED;
- else
- flags |= JOB_FLG_STREAMING_PUSHED;
-
- enum job_type job_type = dyncfg_str2job_type(job_type_str);
- if (job_type == JOB_TYPE_UNKNOWN)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "unknown job type");
-
- if (SERVING_PLUGINSD(parser) && job_type == JOB_TYPE_USER)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "plugins cannot push jobs of type \"user\" (this is allowed only in streaming)");
-
- if (register_job(parser->user.host->configurable_plugins, plugin_name, module_name, job_name, job_type, flags, 0)) // ignore existing is off as this is explicitly called register job
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, "error registering job");
-
- rrdpush_send_dyncfg_reg_job(parser->user.host, plugin_name, module_name, job_name, job_type, flags);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_register_job(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- size_t expected_num_words = SERVING_PLUGINSD(parser) ? 5 : 6;
-
- if (unlikely(num_words != expected_num_words)) {
- char log[LOG_MSG_SIZE + 1];
- snprintfz(log, LOG_MSG_SIZE, "expected %zu (got %zu) parameters: %smodule_name job_name job_type", expected_num_words - 1, num_words - 1, SERVING_PLUGINSD(parser) ? "" : "plugin_name ");
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB, log);
- }
-
- if (SERVING_PLUGINSD(parser)) {
- return pluginsd_register_job_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name);
- }
- return pluginsd_register_job_common(&words[2], num_words - 2, parser, words[1]);
-}
-
-static inline PARSER_RC pluginsd_dyncfg_reset(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- if (unlikely(num_words != (SERVING_PLUGINSD(parser) ? 1 : 2)))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, SERVING_PLUGINSD(parser) ? "expected 0 parameters" : "expected 1 parameter: plugin_name");
-
- if (SERVING_PLUGINSD(parser)) {
- unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item);
- rrdpush_send_dyncfg_reset(parser->user.host, parser->user.cd->configuration->name);
- parser->user.cd->configuration = NULL;
- } else {
- const DICTIONARY_ITEM *di = dictionary_get_and_acquire_item(parser->user.host->configurable_plugins, words[1]);
- if (unlikely(di == NULL))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DYNCFG_RESET, "plugin not found");
- unregister_plugin(parser->user.host->configurable_plugins, di);
- rrdpush_send_dyncfg_reset(parser->user.host, words[1]);
- }
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_job_status_common(char **words, size_t num_words, PARSER *parser, const char *plugin_name) {
- int state = str2i(words[3]);
-
- enum job_status status = str2job_state(words[2]);
- if (unlikely(SERVING_PLUGINSD(parser) && status == JOB_STATUS_UNKNOWN))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "unknown job status");
-
- char *message = NULL;
- if (num_words == 5 && strlen(words[4]) > 0)
- message = words[4];
-
- const DICTIONARY_ITEM *plugin_item;
- DICTIONARY *job_dict;
- const DICTIONARY_ITEM *job_item = report_job_status_acq_lock(parser->user.host->configurable_plugins, &plugin_item, &job_dict, plugin_name, words[0], words[1], status, state, message);
-
- if (job_item != NULL) {
- struct job *job = dictionary_acquired_item_value(job_item);
- rrdpush_send_job_status_update(parser->user.host, plugin_name, words[0], job);
-
- pthread_mutex_unlock(&job->lock);
- dictionary_acquired_item_release(job_dict, job_item);
- dictionary_acquired_item_release(parser->user.host->configurable_plugins, plugin_item);
- }
-
- return PARSER_RC_OK;
-}
-
-// job_status [plugin_name if streaming] <module_name> <job_name> <status_code> <state> [message]
-static PARSER_RC pluginsd_job_status(char **words, size_t num_words, PARSER *parser) {
- if (SERVING_PLUGINSD(parser)) {
- if (unlikely(num_words != 5 && num_words != 6))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 4 or 5 parameters: module_name, job_name, status_code, state, [optional: message]");
- } else {
- if (unlikely(num_words != 6 && num_words != 7))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_REPORT_JOB_STATUS, "expected 5 or 6 parameters: plugin_name, module_name, job_name, status_code, state, [optional: message]");
- }
-
- if (SERVING_PLUGINSD(parser)) {
- return pluginsd_job_status_common(&words[1], num_words - 1, parser, parser->user.cd->configuration->name);
- }
- return pluginsd_job_status_common(&words[2], num_words - 2, parser, words[1]);
-}
-
-static PARSER_RC pluginsd_delete_job(char **words, size_t num_words, PARSER *parser) {
- // this can confuse a bit but there is a diference between KEYWORD_DELETE_JOB and actual delete_job function
- // they are of opossite direction
- if (num_words != 4)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DELETE_JOB, "expected 2 parameters: plugin_name, module_name, job_name");
-
- const char *plugin_name = get_word(words, num_words, 1);
- const char *module_name = get_word(words, num_words, 2);
- const char *job_name = get_word(words, num_words, 3);
-
- if (SERVING_STREAMING(parser))
- delete_job_pname(parser->user.host->configurable_plugins, plugin_name, module_name, job_name);
-
- // forward to parent if any
- rrdpush_send_job_deleted(parser->user.host, plugin_name, module_name, job_name);
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PARSER *parser)
-{
- const char *host_uuid_str = get_word(words, num_words, 1);
- const char *claim_id_str = get_word(words, num_words, 2);
-
- if (!host_uuid_str || !claim_id_str) {
- netdata_log_error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'",
- host_uuid_str ? host_uuid_str : "[unset]",
- claim_id_str ? claim_id_str : "[unset]");
- return PARSER_RC_ERROR;
- }
-
- uuid_t uuid;
- RRDHOST *host = parser->user.host;
-
- // We don't need the parsed UUID
- // just do it to check the format
- if(uuid_parse(host_uuid_str, uuid)) {
- netdata_log_error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str);
- return PARSER_RC_ERROR;
- }
- if(uuid_parse(claim_id_str, uuid) && strcmp(claim_id_str, "NULL") != 0) {
- netdata_log_error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str);
- return PARSER_RC_ERROR;
- }
-
- if(strcmp(host_uuid_str, host->machine_guid) != 0) {
- netdata_log_error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid);
- return PARSER_RC_OK; //the message is OK problem must be somewhere else
- }
-
- rrdhost_aclk_state_lock(host);
-
- if (host->aclk_state.claimed_id)
- freez(host->aclk_state.claimed_id);
-
- host->aclk_state.claimed_id = strcmp(claim_id_str, "NULL") ? strdupz(claim_id_str) : NULL;
-
- rrdhost_aclk_state_unlock(host);
-
- rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_CLAIMID |RRDHOST_FLAG_METADATA_UPDATE);
-
- rrdpush_send_claimed_id(host);
-
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-
-void pluginsd_process_thread_cleanup(void *ptr) {
- PARSER *parser = (PARSER *)ptr;
-
- pluginsd_cleanup_v2(parser);
- pluginsd_host_define_cleanup(parser);
-
- rrd_collector_finished();
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- if(parser->user.stream_log_fp) {
- fclose(parser->user.stream_log_fp);
- parser->user.stream_log_fp = NULL;
- }
-#endif
-
- parser_destroy(parser);
-}
-
-bool parser_reconstruct_node(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.host)
- return false;
-
- buffer_strcat(wb, rrdhost_hostname(parser->user.host));
- return true;
-}
-
-bool parser_reconstruct_instance(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.st)
- return false;
-
- buffer_strcat(wb, rrdset_name(parser->user.st));
- return true;
-}
-
-bool parser_reconstruct_context(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.st)
- return false;
-
- buffer_strcat(wb, string2str(parser->user.st->context));
- return true;
-}
-
-inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations)
-{
- int enabled = cd->unsafe.enabled;
-
- if (!fp_plugin_input || !fp_plugin_output || !enabled) {
- cd->unsafe.enabled = 0;
- return 0;
- }
-
- if (unlikely(fileno(fp_plugin_input) == -1)) {
- netdata_log_error("input file descriptor given is not a valid stream");
- cd->serial_failures++;
- return 0;
- }
-
- if (unlikely(fileno(fp_plugin_output) == -1)) {
- netdata_log_error("output file descriptor given is not a valid stream");
- cd->serial_failures++;
- return 0;
- }
-
- clearerr(fp_plugin_input);
- clearerr(fp_plugin_output);
-
- PARSER *parser;
- {
- PARSER_USER_OBJECT user = {
- .enabled = cd->unsafe.enabled,
- .host = host,
- .cd = cd,
- .trust_durations = trust_durations
- };
-
- // fp_plugin_output = our input; fp_plugin_input = our output
- parser = parser_init(&user, fp_plugin_output, fp_plugin_input, -1, PARSER_INPUT_SPLIT, NULL);
- }
-
- pluginsd_keywords_init(parser, PARSER_INIT_PLUGINSD);
-
- rrd_collector_started();
-
- size_t count = 0;
-
- // this keeps the parser with its current value
- // so, parser needs to be allocated before pushing it
- netdata_thread_cleanup_push(pluginsd_process_thread_cleanup, parser);
-
- {
- ND_LOG_STACK lgs[] = {
- ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line),
- ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- buffered_reader_init(&parser->reader);
- BUFFER *buffer = buffer_create(sizeof(parser->reader.read_buffer) + 2, NULL);
- while(likely(service_running(SERVICE_COLLECTORS))) {
-
- if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) {
- buffered_reader_ret_t ret = buffered_reader_read_timeout(
- &parser->reader,
- fileno((FILE *) parser->fp_input),
- 2 * 60 * MSEC_PER_SEC, true
- );
-
- if(unlikely(ret != BUFFERED_READER_READ_OK))
- break;
-
- continue;
- }
-
- if(unlikely(parser_action(parser, buffer->buffer)))
- break;
-
- buffer->len = 0;
- buffer->buffer[0] = '\0';
- }
- buffer_free(buffer);
-
- cd->unsafe.enabled = parser->user.enabled;
- count = parser->user.data_collections_count;
-
- if(likely(count)) {
- cd->successful_collections += count;
- cd->serial_failures = 0;
- }
- else
- cd->serial_failures++;
- }
-
- // free parser with the pop function
- netdata_thread_cleanup_pop(1);
-
- return count;
-}
-
-void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire) {
- parser_init_repertoire(parser, repertoire);
-
- if (repertoire & (PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING))
- inflight_functions_init(parser);
-}
-
-PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd,
- PARSER_INPUT_TYPE flags, void *ssl __maybe_unused) {
- PARSER *parser;
-
- parser = callocz(1, sizeof(*parser));
- if(user)
- parser->user = *user;
- parser->fd = fd;
- parser->fp_input = fp_input;
- parser->fp_output = fp_output;
-#ifdef ENABLE_HTTPS
- parser->ssl_output = ssl;
-#endif
- parser->flags = flags;
-
- spinlock_init(&parser->writer.spinlock);
- return parser;
-}
-
-PARSER_RC parser_execute(PARSER *parser, PARSER_KEYWORD *keyword, char **words, size_t num_words) {
- switch(keyword->id) {
- case 1:
- return pluginsd_set_v2(words, num_words, parser);
-
- case 2:
- return pluginsd_begin_v2(words, num_words, parser);
-
- case 3:
- return pluginsd_end_v2(words, num_words, parser);
-
- case 11:
- return pluginsd_set(words, num_words, parser);
-
- case 12:
- return pluginsd_begin(words, num_words, parser);
-
- case 13:
- return pluginsd_end(words, num_words, parser);
-
- case 21:
- return pluginsd_replay_set(words, num_words, parser);
-
- case 22:
- return pluginsd_replay_begin(words, num_words, parser);
-
- case 23:
- return pluginsd_replay_rrddim_collection_state(words, num_words, parser);
-
- case 24:
- return pluginsd_replay_rrdset_collection_state(words, num_words, parser);
-
- case 25:
- return pluginsd_replay_end(words, num_words, parser);
-
- case 31:
- return pluginsd_dimension(words, num_words, parser);
-
- case 32:
- return pluginsd_chart(words, num_words, parser);
-
- case 33:
- return pluginsd_chart_definition_end(words, num_words, parser);
-
- case 34:
- return pluginsd_clabel(words, num_words, parser);
-
- case 35:
- return pluginsd_clabel_commit(words, num_words, parser);
-
- case 41:
- return pluginsd_function(words, num_words, parser);
-
- case 42:
- return pluginsd_function_result_begin(words, num_words, parser);
-
- case 51:
- return pluginsd_label(words, num_words, parser);
-
- case 52:
- return pluginsd_overwrite(words, num_words, parser);
-
- case 53:
- return pluginsd_variable(words, num_words, parser);
-
- case 61:
- return streaming_claimed_id(words, num_words, parser);
-
- case 71:
- return pluginsd_host(words, num_words, parser);
-
- case 72:
- return pluginsd_host_define(words, num_words, parser);
-
- case 73:
- return pluginsd_host_define_end(words, num_words, parser);
-
- case 74:
- return pluginsd_host_labels(words, num_words, parser);
-
- case 97:
- return pluginsd_flush(words, num_words, parser);
-
- case 98:
- return pluginsd_disable(words, num_words, parser);
-
- case 99:
- return pluginsd_exit(words, num_words, parser);
-
- case 101:
- return pluginsd_register_plugin(words, num_words, parser);
-
- case 102:
- return pluginsd_register_module(words, num_words, parser);
-
- case 103:
- return pluginsd_register_job(words, num_words, parser);
-
- case 104:
- return pluginsd_dyncfg_reset(words, num_words, parser);
-
- case 110:
- return pluginsd_job_status(words, num_words, parser);
-
- case 111:
- return pluginsd_delete_job(words, num_words, parser);
-
- default:
- fatal("Unknown keyword '%s' with id %zu", keyword->keyword, keyword->id);
- }
-}
-
-#include "gperf-hashtable.h"
-
-void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) {
- parser->repertoire = repertoire;
-
- for(size_t i = GPERF_PARSER_MIN_HASH_VALUE ; i <= GPERF_PARSER_MAX_HASH_VALUE ;i++) {
- if(gperf_keywords[i].keyword && *gperf_keywords[i].keyword && (parser->repertoire & gperf_keywords[i].repertoire))
- worker_register_job_name(gperf_keywords[i].worker_job_id, gperf_keywords[i].keyword);
- }
-}
-
-static void parser_destroy_dyncfg(PARSER *parser) {
- if (parser->user.cd != NULL && parser->user.cd->configuration != NULL) {
- unregister_plugin(parser->user.host->configurable_plugins, parser->user.cd->cfg_dict_item);
- parser->user.cd->configuration = NULL;
- } else if (parser->user.host != NULL && SERVING_STREAMING(parser) && parser->user.host != localhost){
- dictionary_flush(parser->user.host->configurable_plugins);
- }
-}
-
-void parser_destroy(PARSER *parser) {
- if (unlikely(!parser))
- return;
-
- parser_destroy_dyncfg(parser);
-
- dictionary_destroy(parser->inflight.functions);
- freez(parser);
-}
-
-int pluginsd_parser_unittest(void) {
- PARSER *p = parser_init(NULL, NULL, NULL, -1, PARSER_INPUT_SPLIT, NULL);
- pluginsd_keywords_init(p, PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING);
-
- char *lines[] = {
- "BEGIN2 abcdefghijklmnopqr 123",
- "SET2 abcdefg 0x12345678 0 0",
- "SET2 hijklmnoqr 0x12345678 0 0",
- "SET2 stuvwxyz 0x12345678 0 0",
- "END2",
- NULL,
- };
-
- char *words[PLUGINSD_MAX_WORDS];
- size_t iterations = 1000000;
- size_t count = 0;
- char input[PLUGINSD_LINE_MAX + 1];
-
- usec_t started = now_realtime_usec();
- while(--iterations) {
- for(size_t line = 0; lines[line] ;line++) {
- strncpyz(input, lines[line], PLUGINSD_LINE_MAX);
- size_t num_words = quoted_strings_splitter_pluginsd(input, words, PLUGINSD_MAX_WORDS);
- const char *command = get_word(words, num_words, 0);
- PARSER_KEYWORD *keyword = parser_find_keyword(p, command);
- if(unlikely(!keyword))
- fatal("Cannot parse the line '%s'", lines[line]);
- count++;
- }
- }
- usec_t ended = now_realtime_usec();
-
- netdata_log_info("Parsed %zu lines in %0.2f secs, %0.2f klines/sec", count,
- (double)(ended - started) / (double)USEC_PER_SEC,
- (double)count / ((double)(ended - started) / (double)USEC_PER_SEC) / 1000.0);
-
- parser_destroy(p);
- return 0;
-}
diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h
deleted file mode 100644
index 1fce9a89a..000000000
--- a/collectors/plugins.d/pluginsd_parser.h
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_PARSER_H
-#define NETDATA_PLUGINSD_PARSER_H
-
-#include "daemon/common.h"
-
-#define WORKER_PARSER_FIRST_JOB 3
-
-// this has to be in-sync with the same at receiver.c
-#define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3)
-
-// this controls the max response size of a function
-#define PLUGINSD_MAX_DEFERRED_SIZE (100 * 1024 * 1024)
-
-#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024
-
-#define HOST_LABEL_IS_EPHEMERAL "_is_ephemeral"
-// PARSER return codes
-typedef enum __attribute__ ((__packed__)) parser_rc {
- PARSER_RC_OK, // Callback was successful, go on
- PARSER_RC_STOP, // Callback says STOP
- PARSER_RC_ERROR // Callback failed (abort rest of callbacks)
-} PARSER_RC;
-
-typedef enum __attribute__ ((__packed__)) parser_input_type {
- PARSER_INPUT_SPLIT = (1 << 1),
- PARSER_DEFER_UNTIL_KEYWORD = (1 << 2),
-} PARSER_INPUT_TYPE;
-
-typedef enum __attribute__ ((__packed__)) {
- PARSER_INIT_PLUGINSD = (1 << 1),
- PARSER_INIT_STREAMING = (1 << 2),
- PARSER_REP_METADATA = (1 << 3),
-} PARSER_REPERTOIRE;
-
-struct parser;
-typedef PARSER_RC (*keyword_function)(char **words, size_t num_words, struct parser *parser);
-
-typedef struct parser_keyword {
- char *keyword;
- size_t id;
- PARSER_REPERTOIRE repertoire;
- size_t worker_job_id;
-} PARSER_KEYWORD;
-
-typedef struct parser_user_object {
- bool cleanup_slots;
- RRDSET *st;
- RRDHOST *host;
- void *opaque;
- struct plugind *cd;
- int trust_durations;
- RRDLABELS *new_host_labels;
- RRDLABELS *chart_rrdlabels_linked_temporarily;
- size_t data_collections_count;
- int enabled;
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- FILE *stream_log_fp;
- PARSER_REPERTOIRE stream_log_repertoire;
-#endif
-
- STREAM_CAPABILITIES capabilities; // receiver capabilities
-
- struct {
- bool parsing_host;
- uuid_t machine_guid;
- char machine_guid_str[UUID_STR_LEN];
- STRING *hostname;
- RRDLABELS *rrdlabels;
- } host_define;
-
- struct parser_user_object_replay {
- time_t start_time;
- time_t end_time;
-
- usec_t start_time_ut;
- usec_t end_time_ut;
-
- time_t wall_clock_time;
-
- bool rset_enabled;
- } replay;
-
- struct parser_user_object_v2 {
- bool locked_data_collection;
- RRDSET_STREAM_BUFFER stream_buffer; // sender capabilities in this
- time_t update_every;
- time_t end_time;
- time_t wall_clock_time;
- bool ml_locked;
- } v2;
-} PARSER_USER_OBJECT;
-
-typedef struct parser {
- uint8_t version; // Parser version
- PARSER_REPERTOIRE repertoire;
- uint32_t flags;
- int fd; // Socket
- FILE *fp_input; // Input source e.g. stream
- FILE *fp_output; // Stream to send commands to plugin
-
-#ifdef ENABLE_HTTPS
- NETDATA_SSL *ssl_output;
-#endif
-#ifdef ENABLE_H2O
- void *h2o_ctx; // if set we use h2o_stream functions to send data
-#endif
-
- PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls
-
- struct buffered_reader reader;
- struct line_splitter line;
- PARSER_KEYWORD *keyword;
-
- struct {
- const char *end_keyword;
- BUFFER *response;
- void (*action)(struct parser *parser, void *action_data);
- void *action_data;
- } defer;
-
- struct {
- DICTIONARY *functions;
- usec_t smaller_timeout;
- } inflight;
-
- struct {
- SPINLOCK spinlock;
- } writer;
-
-} PARSER;
-
-PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, PARSER_INPUT_TYPE flags, void *ssl);
-void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire);
-void parser_destroy(PARSER *working_parser);
-void pluginsd_cleanup_v2(PARSER *parser);
-void inflight_functions_init(PARSER *parser);
-void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire);
-PARSER_RC parser_execute(PARSER *parser, PARSER_KEYWORD *keyword, char **words, size_t num_words);
-
-static inline int find_first_keyword(const char *src, char *dst, int dst_size, bool *isspace_map) {
- const char *s = src, *keyword_start;
-
- while (unlikely(isspace_map[(uint8_t)*s])) s++;
- keyword_start = s;
-
- while (likely(*s && !isspace_map[(uint8_t)*s]) && dst_size > 1) {
- *dst++ = *s++;
- dst_size--;
- }
- *dst = '\0';
- return dst_size == 0 ? 0 : (int) (s - keyword_start);
-}
-
-PARSER_KEYWORD *gperf_lookup_keyword(register const char *str, register size_t len);
-
-static inline PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *command) {
- PARSER_KEYWORD *t = gperf_lookup_keyword(command, strlen(command));
- if(t && (t->repertoire & parser->repertoire))
- return t;
-
- return NULL;
-}
-
-bool parser_reconstruct_node(BUFFER *wb, void *ptr);
-bool parser_reconstruct_instance(BUFFER *wb, void *ptr);
-bool parser_reconstruct_context(BUFFER *wb, void *ptr);
-
-static inline int parser_action(PARSER *parser, char *input) {
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- static __thread char line[PLUGINSD_LINE_MAX + 1];
- strncpyz(line, input, sizeof(line) - 1);
-#endif
-
- parser->line.count++;
-
- if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) {
- char command[100 + 1];
- bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd);
-
- if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) {
- if(parser->defer.response) {
- buffer_strcat(parser->defer.response, input);
- if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) {
- // more than PLUGINSD_MAX_DEFERRED_SIZE of data,
- // or a bad plugin that did not send the end_keyword
- internal_error(true, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response));
- return 1;
- }
- }
- return 0;
- }
- else {
- // call the action
- parser->defer.action(parser, parser->defer.action_data);
-
- // empty everything
- parser->defer.action = NULL;
- parser->defer.action_data = NULL;
- parser->defer.end_keyword = NULL;
- parser->defer.response = NULL;
- parser->flags &= ~PARSER_DEFER_UNTIL_KEYWORD;
- }
- return 0;
- }
-
- parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS);
- const char *command = get_word(parser->line.words, parser->line.num_words, 0);
-
- if(unlikely(!command)) {
- line_splitter_reset(&parser->line);
- return 0;
- }
-
- PARSER_RC rc;
- parser->keyword = parser_find_keyword(parser, command);
- if(likely(parser->keyword)) {
- worker_is_busy(parser->keyword->worker_job_id);
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire)
- fprintf(parser->user.stream_log_fp, "%s", line);
-#endif
-
- rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words);
- // rc = (*t->func)(words, num_words, parser);
- worker_is_idle();
- }
- else
- rc = PARSER_RC_ERROR;
-
- if(rc == PARSER_RC_ERROR) {
- CLEAN_BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- line_splitter_reconstruct_line(wb, &parser->line);
- netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
- command, parser->line.count, buffer_tostring(wb));
- }
-
- line_splitter_reset(&parser->line);
- return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP);
-}
-
-#endif //NETDATA_PLUGINSD_PARSER_H
diff --git a/collectors/proc.plugin/Makefile.am b/collectors/proc.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/proc.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md
deleted file mode 100644
index 62e46569f..000000000
--- a/collectors/proc.plugin/README.md
+++ /dev/null
@@ -1,639 +0,0 @@
-# OS provided metrics (proc.plugin)
-
-`proc.plugin` gathers metrics from the /proc and /sys folders in Linux systems, along with a few other endpoints, and is responsible for the bulk of the system metrics collected and visualized by Netdata.
-
-This plugin is not an external plugin, but one of Netdata's threads.
-
-In detail, it collects metrics from:
-
-- `/proc/net/dev` (all network interfaces for all their values)
-- `/proc/diskstats` (all disks for all their values)
-- `/proc/mdstat` (status of RAID arrays)
-- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
-- `/proc/net/snmp6` (total IPv6 usage)
-- `/proc/net/netstat` (more IPv4 usage)
-- `/proc/net/wireless` (wireless extension)
-- `/proc/net/stat/nf_conntrack` (connection tracking performance)
-- `/proc/net/stat/synproxy` (synproxy performance)
-- `/proc/net/ip_vs/stats` (IPVS connection statistics)
-- `/proc/stat` (CPU utilization and attributes)
-- `/proc/meminfo` (memory information)
-- `/proc/vmstat` (system performance)
-- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
-- `/sys/fs/cgroup` (Control Groups - Linux Containers)
-- `/proc/self/mountinfo` (mount points)
-- `/proc/interrupts` (total and per core hardware interrupts)
-- `/proc/softirqs` (total and per core software interrupts)
-- `/proc/loadavg` (system load and total processes running)
-- `/proc/pressure/{cpu,memory,io}` (pressure stall information)
-- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
-- `/proc/spl/kstat/zfs/arcstats` (status of ZFS adaptive replacement cache)
-- `/proc/spl/kstat/zfs/pool/state` (state of ZFS pools)
-- `/sys/class/power_supply` (power supply properties)
-- `/sys/class/infiniband` (infiniband interconnect)
-- `/sys/class/drm` (AMD GPUs)
-- `ipc` (IPC semaphores and message queues)
-- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
-- `netdata` (internal Netdata resources utilization)
-
-- - -
-
-## Monitoring Disks
-
-> Live demo of disk monitoring at: **[http://london.netdata.rocks](https://registry.my-netdata.io/#menu_disk)**
-
-Performance monitoring for Linux disks is quite complicated. The main reason is the plethora of disk technologies available. There are many different hardware disk technologies, but there are even more **virtual disk** technologies that can provide additional storage features.
-
-Hopefully, the Linux kernel provides many metrics that can provide deep insights of what our disks our doing. The kernel measures all these metrics on all layers of storage: **virtual disks**, **physical disks** and **partitions of disks**.
-
-### Monitored disk metrics
-
-- **I/O bandwidth/s (kb/s)**
- The amount of data transferred from and to the disk.
-- **Amount of discarded data (kb/s)**
-- **I/O operations/s**
- The number of I/O operations completed.
-- **Extended I/O operations/s**
- The number of extended I/O operations completed.
-- **Queued I/O operations**
- The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
-- **Backlog size (time in ms)**
- The expected duration of the currently queued I/O operations.
-- **Utilization (time percentage)**
- The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
- Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
-- **Average I/O operation time (ms)**
- The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
-- **Average I/O operation time for extended operations (ms)**
- The average time for extended I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
-- **Average I/O operation size (kb)**
- The average amount of data of the completed I/O operations.
-- **Average amount of discarded data (kb)**
- The average amount of data of the completed discard operations.
-- **Average Service Time (ms)**
- The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
-- **Average Service Time for extended I/O operations (ms)**
- The average service time for completed extended I/O operations.
-- **Merged I/O operations/s**
- The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
-- **Merged discard operations/s**
-- **Total I/O time**
- The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
-- **Space usage**
- For mounted disks, Netdata will provide a chart for their space, with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
-- **inode usage**
- For mounted disks, Netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
-
-### disk names
-
-Netdata will automatically set the name of disks on the dashboard, from the mount point they are mounted, of course only when they are mounted. Changes in mount points are not currently detected (you will have to restart Netdata to change the name of the disk). To use disk IDs provided by `/dev/disk/by-id`, the `name disks by id` option should be enabled. The `preferred disk ids` simple pattern allows choosing disk IDs to be used in the first place.
-
-### performance metrics
-
-By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Set `yes` for a chart instead of `auto` to enable it permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-
-Netdata categorizes all block devices in 3 categories:
-
-1. physical disks (i.e. block devices that do not have child devices and are not partitions)
-2. virtual disks (i.e. block devices that have child devices - like RAID devices)
-3. disk partitions (i.e. block devices that are part of a physical disk)
-
-Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file.
-
-### Netdata configuration
-
-You can get the running Netdata configuration using this:
-
-```sh
-cd /etc/netdata
-curl "http://localhost:19999/netdata.conf" >netdata.conf.new
-mv netdata.conf.new netdata.conf
-```
-
-Then edit `netdata.conf` and find the following section. This is the basic plugin configuration.
-
-```
-[plugin:proc:/proc/diskstats]
- # enable new disks detected at runtime = yes
- # performance metrics for physical disks = auto
- # performance metrics for virtual disks = auto
- # performance metrics for partitions = no
- # bandwidth for all disks = auto
- # operations for all disks = auto
- # merged operations for all disks = auto
- # i/o time for all disks = auto
- # queued operations for all disks = auto
- # utilization percentage for all disks = auto
- # extended operations for all disks = auto
- # backlog for all disks = auto
- # bcache for all disks = auto
- # bcache priority stats update every = 0
- # remove charts of removed disks = yes
- # path to get block device = /sys/block/%s
- # path to get block device bcache = /sys/block/%s/bcache
- # path to get virtual block device = /sys/devices/virtual/block/%s
- # path to get block device infos = /sys/dev/block/%lu:%lu/%s
- # path to device mapper = /dev/mapper
- # path to /dev/disk/by-label = /dev/disk/by-label
- # path to /dev/disk/by-id = /dev/disk/by-id
- # path to /dev/vx/dsk = /dev/vx/dsk
- # name disks by id = no
- # preferred disk ids = *
- # exclude disks = loop* ram*
- # filename to monitor = /proc/diskstats
- # performance metrics for disks with major 8 = yes
-```
-
-For each virtual disk, physical disk and partition you will have a section like this:
-
-```
-[plugin:proc:/proc/diskstats:sda]
- # enable = yes
- # enable performance metrics = auto
- # bandwidth = auto
- # operations = auto
- # merged operations = auto
- # i/o time = auto
- # queued operations = auto
- # utilization percentage = auto
- # extended operations = auto
- # backlog = auto
-```
-
-For all configuration options:
-
-- `auto` = enable monitoring if the collected values are not zero
-- `yes` = enable monitoring
-- `no` = disable monitoring
-
-Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
-
-After saving `/etc/netdata/netdata.conf`, restart your Netdata to apply them.
-
-#### Disabling performance metrics for individual device and to multiple devices by device type
-
-You can pretty easy disable performance metrics for individual device, for ex.:
-
-```
-[plugin:proc:/proc/diskstats:sda]
- enable performance metrics = no
-```
-
-But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
-
-```
- 7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
- 7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
- 7 2 loop2 36 0 216 4 0 0 0 0 0 4 4
- 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
- 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
- 251 2 zram2 27487 0 219896 188 79953 0 639624 1640 0 1828 1828
- 251 3 zram3 27348 0 218784 152 79952 0 639616 1960 0 2060 2104
-```
-
-All zram devices starts with `251` number and all loop devices starts with `7`.
-So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
-
-```
-[plugin:proc:/proc/diskstats]
- performance metrics for disks with major 7 = no
-```
-
-## Monitoring RAID arrays
-
-### Monitored RAID array metrics
-
-1. **Health** Number of failed disks in every array (aggregate chart).
-
-2. **Disks stats**
-
-- total (number of devices array ideally would have)
-- inuse (number of devices currently are in use)
-
-3. **Mismatch count**
-
-- unsynchronized blocks
-
-4. **Current status**
-
-- resync in percent
-- recovery in percent
-- reshape in percent
-- check in percent
-
-5. **Operation status** (if resync/recovery/reshape/check is active)
-
-- finish in minutes
-- speed in megabytes/s
-
-6. **Nonredundant array availability**
-
-#### configuration
-
-```
-[plugin:proc:/proc/mdstat]
- # faulty devices = yes
- # nonredundant arrays availability = yes
- # mismatch count = auto
- # disk stats = yes
- # operation status = yes
- # make charts obsolete = yes
- # filename to monitor = /proc/mdstat
- # mismatch_cnt filename to monitor = /sys/block/%s/md/mismatch_cnt
-```
-
-## Monitoring CPUs
-
-The `/proc/stat` module monitors CPU utilization, interrupts, context switches, processes started/running, thermal
-throttling, frequency, and idle states. It gathers this information from multiple files.
-
-If your system has more than 50 processors (`physical processors * cores per processor * threads per core`), the Agent
-automatically disables CPU thermal throttling, frequency, and idle state charts. To override this default, see the next
-section on configuration.
-
-### Configuration
-
-The settings for monitoring CPUs is in the `[plugin:proc:/proc/stat]` of your `netdata.conf` file.
-
-The `keep per core files open` option lets you reduce the number of file operations on multiple files.
-
-If your system has more than 50 processors and you would like to see the CPU thermal throttling, frequency, and idle
-state charts that are automatically disabled, you can set the following boolean options in the
-`[plugin:proc:/proc/stat]` section.
-
-```conf
- keep per core files open = yes
- keep cpuidle files open = yes
- core_throttle_count = yes
- package_throttle_count = yes
- cpu frequency = yes
- cpu idle states = yes
-```
-
-### CPU frequency
-
-The module shows the current CPU frequency as set by the `cpufreq` kernel
-module.
-
-**Requirement:**
-You need to have `CONFIG_CPU_FREQ` and (optionally) `CONFIG_CPU_FREQ_STAT`
-enabled in your kernel.
-
-`cpufreq` interface provides two different ways of getting the information through `/sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq` and `/sys/devices/system/cpu/cpu*/cpufreq/stats/time_in_state` files. The latter is more accurate so it is preferred in the module. `scaling_cur_freq` represents only the current CPU frequency, and doesn't account for any state changes which happen between updates. The module switches back and forth between these two methods if governor is changed.
-
-It produces one chart with multiple lines (one line per core).
-
-#### configuration
-
-`scaling_cur_freq filename to monitor` and `time_in_state filename to monitor` in the `[plugin:proc:/proc/stat]` configuration section
-
-### CPU idle states
-
-The module monitors the usage of CPU idle states.
-
-**Requirement:**
-Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
-
-It produces one stacked chart per CPU, showing the percentage of time spent in
-each state.
-
-#### configuration
-
-`schedstat filename to monitor`, `cpuidle name filename to monitor`, and `cpuidle time filename to monitor` in the `[plugin:proc:/proc/stat]` configuration section
-
-## Monitoring memory
-
-### Monitored memory metrics
-
-- Amount of memory swapped in/out
-- Amount of memory paged from/to disk
-- Number of memory page faults
-- Number of out of memory kills
-- Number of NUMA events
-
-### Configuration
-
-```conf
-[plugin:proc:/proc/vmstat]
- filename to monitor = /proc/vmstat
- swap i/o = auto
- disk i/o = yes
- memory page faults = yes
- out of memory kills = yes
- system-wide numa metric summary = auto
-```
-
-## Monitoring Network Interfaces
-
-### Monitored network interface metrics
-
-- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
- The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section.
-
-- **Bandwidth (kilobits/s)**
- The amount of data received and sent through the interface.
-
-- **Packets (packets/s)**
- The number of packets received, packets sent, and multicast packets transmitted through the interface.
-
-- **Interface Errors (errors/s)**
- The number of errors for the inbound and outbound traffic on the interface.
-
-- **Interface Drops (drops/s)**
- The number of packets dropped for the inbound and outbound traffic on the interface.
-
-- **Interface FIFO Buffer Errors (errors/s)**
- The number of FIFO buffer errors encountered while receiving and transmitting data through the interface.
-
-- **Compressed Packets (packets/s)**
- The number of compressed packets transmitted or received by the device driver.
-
-- **Network Interface Events (events/s)**
- The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver.
-
-By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
-
-### Monitoring wireless network interfaces
-
-The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]` section of your `netdata.conf` file.
-
-```conf
- status for all interfaces = yes
- quality for all interfaces = yes
- discarded packets for all interfaces = yes
- missed beacon for all interface = yes
-```
-
-You can set the following values for each configuration option:
-
-- `auto` = enable monitoring if the collected values are not zero
-- `yes` = enable monitoring
-- `no` = disable monitoring
-
-#### Monitored wireless interface metrics
-
-- **Status**
- The current state of the interface. This is a device-dependent option.
-
-- **Link**
- Overall quality of the link.
-
-- **Level**
- Received signal strength (RSSI), which indicates how strong the received signal is.
-
-- **Noise**
- Background noise level.
-
-- **Discarded packets**
- Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`).
-
-- **Missed beacon**
- Number of periodic beacons from the cell or the access point the interface has missed.
-
-#### Wireless configuration
-
-#### alerts
-
-There are several alerts defined in `health.d/net.conf`.
-
-The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alert-line-families) line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`.
-
-#### configuration
-
-Module configuration:
-
-```
-[plugin:proc:/proc/net/dev]
- # filename to monitor = /proc/net/dev
- # path to get virtual interfaces = /sys/devices/virtual/net/%s
- # path to get net device speed = /sys/class/net/%s/speed
- # enable new interfaces detected at runtime = auto
- # bandwidth for all interfaces = auto
- # packets for all interfaces = auto
- # errors for all interfaces = auto
- # drops for all interfaces = auto
- # fifo for all interfaces = auto
- # compressed packets for all interfaces = auto
- # frames, collisions, carrier counters for all interfaces = auto
- # disable by default interfaces matching = lo fireqos* *-ifb
- # refresh interface speed every seconds = 10
-```
-
-Per interface configuration:
-
-```
-[plugin:proc:/proc/net/dev:enp0s3]
- # enabled = yes
- # virtual = no
- # bandwidth = auto
- # packets = auto
- # errors = auto
- # drops = auto
- # fifo = auto
- # compressed = auto
- # events = auto
-```
-
-## Linux Anti-DDoS
-
-![image6](https://cloud.githubusercontent.com/assets/2662304/14253733/53550b16-fa95-11e5-8d9d-4ed171df4735.gif)
-
----
-
-SYNPROXY is a TCP SYN packets proxy. It can be used to protect any TCP server (like a web server) from SYN floods and similar DDos attacks.
-
-SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections.
-
-The net effect of this, is that the real servers will not notice any change during the attack. The valid TCP connections will pass through and served, while the attack will be stopped at the firewall.
-
-Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides:
-
-- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
-- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
-
-### Real-time monitoring of Linux Anti-DDoS
-
-Netdata is able to monitor in real-time (per second updates) the operation of the Linux Anti-DDoS protection.
-
-It visualizes 4 charts:
-
-1. TCP SYN Packets received on ports operated by SYNPROXY
-2. TCP Cookies (valid, invalid, retransmits)
-3. Connections Reopened
-4. Entries used
-
-Example image:
-
-![ddos](https://cloud.githubusercontent.com/assets/2662304/14398891/6016e3fc-fdf0-11e5-942b-55de6a52cb66.gif)
-
-See Linux Anti-DDoS in action at: **[Netdata demo site (with SYNPROXY enabled)](https://registry.my-netdata.io/#menu_netfilter_submenu_synproxy)**
-
-## Linux power supply
-
-This module monitors various metrics reported by power supply drivers
-on Linux. This allows tracking and alerting on things like remaining
-battery capacity.
-
-Depending on the underlying driver, it may provide the following charts
-and metrics:
-
-1. Capacity: The power supply capacity expressed as a percentage.
-
- - capacity_now
-
-2. Charge: The charge for the power supply, expressed as amphours.
-
- - charge_full_design
- - charge_full
- - charge_now
- - charge_empty
- - charge_empty_design
-
-3. Energy: The energy for the power supply, expressed as watthours.
-
- - energy_full_design
- - energy_full
- - energy_now
- - energy_empty
- - energy_empty_design
-
-4. Voltage: The voltage for the power supply, expressed as volts.
-
- - voltage_max_design
- - voltage_max
- - voltage_now
- - voltage_min
- - voltage_min_design
-
-#### configuration
-
-```
-[plugin:proc:/sys/class/power_supply]
- # battery capacity = yes
- # battery charge = no
- # battery energy = no
- # power supply voltage = no
- # keep files open = auto
- # directory to monitor = /sys/class/power_supply
-```
-
-#### notes
-
-- Most drivers provide at least the first chart. Battery powered ACPI
- compliant systems (like most laptops) provide all but the third, but do
- not provide all of the metrics for each chart.
-
-- Current, energy, and voltages are reported with a *very* high precision
- by the power_supply framework. Usually, this is far higher than the
- actual hardware supports reporting, so expect to see changes in these
- charts jump instead of scaling smoothly.
-
-- If `max` or `full` attribute is defined by the driver, but not a
- corresponding `min` or `empty` attribute, then Netdata will still provide
- the corresponding `min` or `empty`, which will then always read as zero.
- This way, alerts which match on these will still work.
-
-## Infiniband interconnect
-
-This module monitors every active Infiniband port. It provides generic counters statistics, and per-vendor hw-counters (if vendor is supported).
-
-### Monitored interface metrics
-
-Each port will have its counters metrics monitored, grouped in the following charts:
-
-- **Bandwidth usage**
- Sent/Received data, in KB/s
-
-- **Packets Statistics**
- Sent/Received packets, in 3 categories: total, unicast and multicast.
-
-- **Errors Statistics**
- Many errors counters are provided, presenting statistics for:
- - Packets: malformed, sent/received discarded by card/switch, missing resource
- - Link: downed, recovered, integrity error, minor error
- - Other events: Tick Wait to send, buffer overrun
-
-If your vendor is supported, you'll also get HW-Counters statistics. These being vendor specific, please refer to their documentation.
-
-- Mellanox: [see statistics documentation](https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters)
-
-### configuration
-
-Default configuration will monitor only enabled infiniband ports, and refresh newly activated or created ports every 30 seconds
-
-```
-[plugin:proc:/sys/class/infiniband]
- # dirname to monitor = /sys/class/infiniband
- # bandwidth counters = yes
- # packets counters = yes
- # errors counters = yes
- # hardware packets counters = auto
- # hardware errors counters = auto
- # monitor only ports being active = auto
- # disable by default interfaces matching =
- # refresh ports state every seconds = 30
-```
-
-## AMD GPUs
-
-This module monitors every AMD GPU card discovered at agent startup.
-
-### Monitored GPU metrics
-
-The following charts will be provided:
-
-- **GPU utilization**
-- **GPU memory utilization**
-- **GPU clock frequency**
-- **GPU memory clock frequency**
-- **VRAM memory usage percentage**
-- **VRAM memory usage**
-- **visible VRAM memory usage percentage**
-- **visible VRAM memory usage**
-- **GTT memory usage percentage**
-- **GTT memory usage**
-
-### configuration
-
-The `drm` path can be configured if it differs from the default:
-
-```
-[plugin:proc:/sys/class/drm]
- # directory to monitor = /sys/class/drm
-```
-
-> [!NOTE]
-> Temperature, fan speed, voltage and power metrics for AMD GPUs can be monitored using the [Sensors](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md) plugin.
-
-## IPC
-
-### Monitored IPC metrics
-
-- **number of messages in message queues**
-- **amount of memory used by message queues**
-- **number of semaphores**
-- **number of semaphore arrays**
-- **number of shared memory segments**
-- **amount of memory used by shared memory segments**
-
-As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable).
-
-### configuration
-
-```
-[plugin:proc:ipc]
- # message queues = yes
- # semaphore totals = yes
- # shared memory totals = yes
- # msg filename to monitor = /proc/sysvipc/msg
- # shm filename to monitor = /proc/sysvipc/shm
- # max dimensions in memory allowed = 50
-```
-
-
diff --git a/collectors/proc.plugin/integrations/amd_gpu.md b/collectors/proc.plugin/integrations/amd_gpu.md
deleted file mode 100644
index e85cce221..000000000
--- a/collectors/proc.plugin/integrations/amd_gpu.md
+++ /dev/null
@@ -1,110 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/amd_gpu.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "AMD GPU"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# AMD GPU
-
-
-<img src="https://netdata.cloud/img/amd.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/class/drm
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.
-
-It reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per gpu
-
-These metrics refer to the GPU.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| product_name | GPU product name (e.g. AMD RX 6600) |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| amdgpu.gpu_utilization | utilization | percentage |
-| amdgpu.gpu_mem_utilization | utilization | percentage |
-| amdgpu.gpu_clk_frequency | frequency | MHz |
-| amdgpu.gpu_mem_clk_frequency | frequency | MHz |
-| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |
-| amdgpu.gpu_mem_vram_usage | free, used | bytes |
-| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |
-| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |
-| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |
-| amdgpu.gpu_mem_gtt_usage | free, used | bytes |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/btrfs.md b/collectors/proc.plugin/integrations/btrfs.md
deleted file mode 100644
index 5f994c841..000000000
--- a/collectors/proc.plugin/integrations/btrfs.md
+++ /dev/null
@@ -1,137 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/btrfs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "BTRFS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Filesystem/BTRFS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# BTRFS
-
-
-<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/fs/btrfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides usage and error statistics from the BTRFS filesystem.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per btrfs filesystem
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| filesystem_uuid | TBD |
-| filesystem_label | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |
-| btrfs.data | free, used | MiB |
-| btrfs.metadata | free, used, reserved | MiB |
-| btrfs.system | free, used | MiB |
-| btrfs.commits | commits | commits |
-| btrfs.commits_perc_time | commits | percentage |
-| btrfs.commit_timings | last, max | ms |
-
-### Per btrfs device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device_id | TBD |
-| filesystem_uuid | TBD |
-| filesystem_label | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |
-| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |
-| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |
-| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |
-| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |
-| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |
-| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |
-| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |
-| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/conntrack.md b/collectors/proc.plugin/integrations/conntrack.md
deleted file mode 100644
index b38f6b508..000000000
--- a/collectors/proc.plugin/integrations/conntrack.md
+++ /dev/null
@@ -1,105 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/conntrack.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Conntrack"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Firewall"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Conntrack
-
-
-<img src="https://netdata.cloud/img/firewall.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/stat/nf_conntrack
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Conntrack instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| netfilter.conntrack_sockets | connections | active connections |
-| netfilter.conntrack_new | new, ignore, invalid | connections/s |
-| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |
-| netfilter.conntrack_expect | created, deleted, new | expectations/s |
-| netfilter.conntrack_search | searched, restarted, found | searches/s |
-| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/disk_statistics.md b/collectors/proc.plugin/integrations/disk_statistics.md
deleted file mode 100644
index 8f7448c39..000000000
--- a/collectors/proc.plugin/integrations/disk_statistics.md
+++ /dev/null
@@ -1,149 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/disk_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Disk Statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Disk"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Disk Statistics
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/diskstats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Detailed statistics for each of your system's disk devices and partitions.
-The data is reported by the kernel and can be used to monitor disk activity on a Linux system.
-
-Get valuable insight into how your disks are performing and where potential bottlenecks might be.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Disk Statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.io | in, out | KiB/s |
-
-### Per disk
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | TBD |
-| mount_point | TBD |
-| device_type | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| disk.io | reads, writes | KiB/s |
-| disk_ext.io | discards | KiB/s |
-| disk.ops | reads, writes | operations/s |
-| disk_ext.ops | discards, flushes | operations/s |
-| disk.qops | operations | operations |
-| disk.backlog | backlog | milliseconds |
-| disk.busy | busy | milliseconds |
-| disk.util | utilization | % of time working |
-| disk.mops | reads, writes | merged operations/s |
-| disk_ext.mops | discards | merged operations/s |
-| disk.iotime | reads, writes | milliseconds/s |
-| disk_ext.iotime | discards, flushes | milliseconds/s |
-| disk.await | reads, writes | milliseconds/operation |
-| disk_ext.await | discards, flushes | milliseconds/operation |
-| disk.avgsz | reads, writes | KiB/operation |
-| disk_ext.avgsz | discards | KiB/operation |
-| disk.svctm | svctm | milliseconds/operation |
-| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |
-| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |
-| disk.bcache_rates | congested, writeback | KiB/s |
-| disk.bcache_size | dirty | MiB |
-| disk.bcache_usage | avail | percentage |
-| disk.bcache_cache_read_races | races, errors | operations/s |
-| disk.bcache | hits, misses, collisions, readaheads | operations/s |
-| disk.bcache_bypass | hits, misses | operations/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |
-| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |
-| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |
-| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/entropy.md b/collectors/proc.plugin/integrations/entropy.md
deleted file mode 100644
index 8432a1f96..000000000
--- a/collectors/proc.plugin/integrations/entropy.md
+++ /dev/null
@@ -1,133 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/entropy.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Entropy"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/System"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Entropy
-
-
-<img src="https://netdata.cloud/img/syslog.png" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/sys/kernel/random/entropy_avail
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Entropy, a measure of the randomness or unpredictability of data.
-
-In the context of cryptography, entropy is used to generate random numbers or keys that are essential for
-secure communication and encryption. Without a good source of entropy, cryptographic protocols can become
-vulnerable to attacks that exploit the predictability of the generated keys.
-
-In most operating systems, entropy is generated by collecting random events from various sources, such as
-hardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool
-of entropy, which is then used to generate random numbers when needed.
-
-The `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs
-to access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,
-which blocks until enough entropy is available to generate the requested numbers. This ensures that the
-generated numbers are truly random and not predictable.
-
-However, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing
-programs that rely on random numbers to slow down or even freeze. This is especially problematic for
-cryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.
-
-To avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality
-entropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or
-radioactive decay. These sources of randomness are considered to be more reliable and unpredictable than
-software-based sources.
-
-One such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used
-for cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates
-high-quality entropy, which can be used to seed the pool of entropy in the operating system.
-
-Alternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by
-exploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions
-can help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.
-
-
-
-
-This collector is only supported on the following platforms:
-
-- linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Entropy instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.entropy | entropy | entropy |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel’s random number generator |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/infiniband.md b/collectors/proc.plugin/integrations/infiniband.md
deleted file mode 100644
index 6cb5fdc53..000000000
--- a/collectors/proc.plugin/integrations/infiniband.md
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/infiniband.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "InfiniBand"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# InfiniBand
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/class/infiniband
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors InfiniBand network inteface statistics.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per infiniband port
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ib.bytes | Received, Sent | kilobits/s |
-| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |
-| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |
-| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |
-| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/inter_process_communication.md b/collectors/proc.plugin/integrations/inter_process_communication.md
deleted file mode 100644
index 55708a4b0..000000000
--- a/collectors/proc.plugin/integrations/inter_process_communication.md
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/inter_process_communication.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Inter Process Communication"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/IPC"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Inter Process Communication
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: ipc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-IPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each
-other and synchronize their actions.
-
-This collector exposes information about:
-
-- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that
- allows messages to be placed onto a queue and read at a later time.
-
-- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by
- reading/writing into shared memory segments.
-
-- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple
- processes are trying to access a single shared resource, semaphores can ensure that only one process
- accesses the resource at a given time.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Inter Process Communication instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ipc_semaphores | semaphores | semaphores |
-| system.ipc_semaphore_arrays | arrays | arrays |
-| system.message_queue_message | a dimension per queue | messages |
-| system.message_queue_bytes | a dimension per queue | bytes |
-| system.shared_memory_segments | segments | segments |
-| system.shared_memory_bytes | bytes | bytes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |
-| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/interrupts.md b/collectors/proc.plugin/integrations/interrupts.md
deleted file mode 100644
index 1b85fb767..000000000
--- a/collectors/proc.plugin/integrations/interrupts.md
+++ /dev/null
@@ -1,141 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/interrupts.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Interrupts"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/CPU"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Interrupts
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/interrupts
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.
-The numbers reported are the counts of the interrupts that have occurred of each type.
-
-An interrupt is a signal to the processor emitted by hardware or software indicating an event that needs
-immediate attention. The processor then interrupts its current activities and executes the interrupt handler
-to deal with the event. This is part of the way a computer multitasks and handles concurrent processing.
-
-The types of interrupts include:
-
-- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when
- you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.
-
-- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily
- used to switch the CPU among different tasks.
-
-- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.
-
-- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.
-
-Monitoring `/proc/interrupts` can be used for:
-
-- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not
- configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system
- performance degradation.
-
-- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.
-
-- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you
- understand what your system is doing. It can provide insights into the system's interaction with hardware,
- drivers, and other parts of the kernel.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Interrupts instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.interrupts | a dimension per device | interrupts/s |
-
-### Per cpu core
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| cpu | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.interrupts | a dimension per device | interrupts/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/ip_virtual_server.md b/collectors/proc.plugin/integrations/ip_virtual_server.md
deleted file mode 100644
index 5c7afd2eb..000000000
--- a/collectors/proc.plugin/integrations/ip_virtual_server.md
+++ /dev/null
@@ -1,97 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/ip_virtual_server.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "IP Virtual Server"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# IP Virtual Server
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/ip_vs_stats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors IP Virtual Server statistics
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per IP Virtual Server instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipvs.sockets | connections | connections/s |
-| ipvs.packets | received, sent | packets/s |
-| ipvs.net | received, sent | kilobits/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md b/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
deleted file mode 100644
index 2c1ee2721..000000000
--- a/collectors/proc.plugin/integrations/ipv6_socket_statistics.md
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/ipv6_socket_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "IPv6 Socket Statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# IPv6 Socket Statistics
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/sockstat6
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides IPv6 socket statistics.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per IPv6 Socket Statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipv6.sockstat6_tcp_sockets | inuse | sockets |
-| ipv6.sockstat6_udp_sockets | inuse | sockets |
-| ipv6.sockstat6_udplite_sockets | inuse | sockets |
-| ipv6.sockstat6_raw_sockets | inuse | sockets |
-| ipv6.sockstat6_frag_sockets | inuse | fragments |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/kernel_same-page_merging.md b/collectors/proc.plugin/integrations/kernel_same-page_merging.md
deleted file mode 100644
index 336f0feaf..000000000
--- a/collectors/proc.plugin/integrations/kernel_same-page_merging.md
+++ /dev/null
@@ -1,103 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/kernel_same-page_merging.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Kernel Same-Page Merging"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Kernel Same-Page Merging
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/kernel/mm/ksm
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Kernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the
-memory of different processes and identify identical pages. It then merges these identical pages into a
-single page that the processes share. This is particularly useful for virtualization, where multiple virtual
-machines might be running the same operating system or applications and have many identical pages.
-
-The collector provides information about the operation and effectiveness of KSM on your system.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Kernel Same-Page Merging instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.ksm | shared, unshared, sharing, volatile | MiB |
-| mem.ksm_savings | savings, offered | MiB |
-| mem.ksm_ratios | savings | percentage |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/md_raid.md b/collectors/proc.plugin/integrations/md_raid.md
deleted file mode 100644
index 34a4840bb..000000000
--- a/collectors/proc.plugin/integrations/md_raid.md
+++ /dev/null
@@ -1,125 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/md_raid.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "MD RAID"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Disk"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# MD RAID
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/mdstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors the status of MD RAID devices.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per MD RAID instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| md.health | a dimension per md array | failed disks |
-
-### Per md array
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | TBD |
-| raid_level | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| md.disks | inuse, down | disks |
-| md.mismatch_cnt | count | unsynchronized blocks |
-| md.status | check, resync, recovery, reshape | percent |
-| md.expected_time_until_operation_finish | finish_in | seconds |
-| md.operation_speed | speed | KiB/s |
-| md.nonredundant | available | boolean |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |
-| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |
-| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |
-| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/memory_modules_dimms.md b/collectors/proc.plugin/integrations/memory_modules_dimms.md
deleted file mode 100644
index 351c6fcd7..000000000
--- a/collectors/proc.plugin/integrations/memory_modules_dimms.md
+++ /dev/null
@@ -1,146 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_modules_dimms.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Memory modules (DIMMs)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Memory modules (DIMMs)
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/devices/system/edac/mc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-The Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,
-primarily ECC (Error-Correcting Code) memory errors.
-
-The collector provides data for:
-
-- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:
- - errors related to a DIMM
- - errors that cannot be associated with a DIMM
-
-- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:
- - memory controllers that can identify the physical DIMMS and report errors directly for them,
- - memory controllers that report errors for memory address ranges that can be linked to dimms.
- In this case the DIMMS reported may be more than the physical DIMMS installed.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per memory controller
-
-These metrics refer to the memory controller.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |
-| mc_name | Memory controller type. |
-| size_mb | The amount of memory in megabytes that this memory controller manages. |
-| max_location | Last available memory slot in this memory controller. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.edac_mc | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors/s |
-
-### Per memory module
-
-These metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |
-| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |
-| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |
-| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |
-| dimm_label | Label assigned to this memory module. |
-| dimm_location | Location of the memory module. |
-| dimm_mem_type | Type of the memory module. |
-| size | The amount of memory in megabytes that this memory module manages. |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.edac_mc | correctable, uncorrectable | errors/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) in the last 10 minutes |
-| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) in the last 10 minutes |
-| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors in the last 10 minutes |
-| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.edac_mc_dimm | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors in the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/memory_statistics.md b/collectors/proc.plugin/integrations/memory_statistics.md
deleted file mode 100644
index 52f1bf530..000000000
--- a/collectors/proc.plugin/integrations/memory_statistics.md
+++ /dev/null
@@ -1,138 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Memory Statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Memory Statistics
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/vmstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Linux Virtual memory subsystem.
-
-Information about memory management, indicating how effectively the kernel allocates and frees
-memory resources in response to system demands.
-
-Monitors page faults, which occur when a process requests a portion of its memory that isn't
-immediately available. Monitoring these events can help diagnose inefficiencies in memory management and
-provide insights into application behavior.
-
-Tracks swapping activity — a vital aspect of memory management where the kernel moves data from RAM to
-swap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,
-a compressed cache for swap pages, and provides insights into its usage and performance implications.
-
-In the context of virtualized environments, it tracks the ballooning mechanism which is used to balance
-memory resources between host and guest systems.
-
-For systems using NUMA architecture, it provides insights into the local and remote memory accesses, which
-can impact the performance based on the memory access times.
-
-The collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out
-of memory resources.
-
-
-
-
-This collector is only supported on the following platforms:
-
-- linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Memory Statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.swapio | in, out | KiB/s |
-| system.pgpgio | in, out | KiB/s |
-| system.pgfaults | minor, major | faults/s |
-| mem.balloon | inflate, deflate, migrate | KiB/s |
-| mem.zswapio | in, out | KiB/s |
-| mem.ksm_cow | swapin, write | KiB/s |
-| mem.thp_faults | alloc, fallback, fallback_charge | events/s |
-| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |
-| mem.thp_zero | alloc, failed | events/s |
-| mem.thp_collapse | alloc, failed | events/s |
-| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |
-| mem.thp_swapout | swapout, fallback | events/s |
-| mem.thp_compact | success, fail, stall | events/s |
-| mem.oom_kill | kills | kills/s |
-| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |
-| [ oom_kill ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/memory_usage.md b/collectors/proc.plugin/integrations/memory_usage.md
deleted file mode 100644
index 141bd29ad..000000000
--- a/collectors/proc.plugin/integrations/memory_usage.md
+++ /dev/null
@@ -1,135 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/memory_usage.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Memory Usage"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Memory Usage
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/meminfo
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information
-about different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,
-SLAB memory, memory mappings, and more.
-
-Monitoring /proc/meminfo can be useful for:
-
-- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system
- tuning and optimization. For example, if your system is frequently low on free memory, it might benefit
- from more RAM.
-
-- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about
- whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could
- mean that your system is swapping out a lot of memory to disk, which can degrade performance.
-
-- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed
- decisions about future capacity needs.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Memory Usage instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ram | free, used, cached, buffers | MiB |
-| mem.available | avail | MiB |
-| mem.swap | free, used | MiB |
-| mem.swap_cached | cached | MiB |
-| mem.zswap | in-ram, on-disk | MiB |
-| mem.hwcorrupt | HardwareCorrupted | MiB |
-| mem.commited | Commited_AS | MiB |
-| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |
-| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |
-| mem.slab | reclaimable, unreclaimable | MiB |
-| mem.hugepages | free, used, surplus, reserved | MiB |
-| mem.thp | anonymous, shmem | MiB |
-| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |
-| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |
-| mem.high_low | high_used, low_used, high_free, low_free | MiB |
-| mem.cma | used, free | MiB |
-| mem.directmaps | 4k, 2m, 4m, 1g | MiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | system.ram | system memory utilization |
-| [ ram_available ](https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |
-| [ used_swap ](https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf) | mem.swap | swap memory utilization |
-| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/network_interfaces.md b/collectors/proc.plugin/integrations/network_interfaces.md
deleted file mode 100644
index 0cfd56fae..000000000
--- a/collectors/proc.plugin/integrations/network_interfaces.md
+++ /dev/null
@@ -1,137 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/network_interfaces.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Network interfaces"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Network interfaces
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/dev
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor network interface metrics about bandwidth, state, errors and more.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Network interfaces instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.net | received, sent | kilobits/s |
-
-### Per network device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| interface_type | TBD |
-| device | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| net.net | received, sent | kilobits/s |
-| net.speed | speed | kilobits/s |
-| net.duplex | full, half, unknown | state |
-| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |
-| net.carrier | up, down | state |
-| net.mtu | mtu | octets |
-| net.packets | received, sent, multicast | packets/s |
-| net.errors | inbound, outbound | errors/s |
-| net.drops | inbound, outbound | drops/s |
-| net.fifo | receive, transmit | errors |
-| net.compressed | received, sent | packets/s |
-| net.events | frames, collisions, carrier | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ interface_speed ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |
-| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |
-| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |
-| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |
-| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |
-| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |
-| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/network_statistics.md b/collectors/proc.plugin/integrations/network_statistics.md
deleted file mode 100644
index 726fd9d61..000000000
--- a/collectors/proc.plugin/integrations/network_statistics.md
+++ /dev/null
@@ -1,161 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/network_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Network statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Network statistics
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/netstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Network statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.ip | received, sent | kilobits/s |
-| ip.tcpmemorypressures | pressures | events/s |
-| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |
-| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |
-| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |
-| ip.tcpsyncookies | received, sent, failed | packets/s |
-| ip.tcp_syn_queue | drops, cookies | packets/s |
-| ip.tcp_accept_queue | overflows, drops | packets/s |
-| ip.tcpsock | connections | active connections |
-| ip.tcppackets | received, sent | packets/s |
-| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |
-| ip.tcpopens | active, passive | connections/s |
-| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |
-| ipv4.packets | received, sent, forwarded, delivered | packets/s |
-| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |
-| ipc4.bcast | received, sent | kilobits/s |
-| ipv4.bcastpkts | received, sent | packets/s |
-| ipv4.mcast | received, sent | kilobits/s |
-| ipv4.mcastpkts | received, sent | packets/s |
-| ipv4.icmp | received, sent | packets/s |
-| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |
-| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |
-| ipv4.udppackets | received, sent | packets/s |
-| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |
-| ipv4.udplite | received, sent | packets/s |
-| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |
-| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |
-| ipv4.fragsin | ok, failed, all | packets/s |
-| ipv4.fragsout | ok, failed, created | packets/s |
-| system.ipv6 | received, sent | kilobits/s |
-| ipv6.packets | received, sent, forwarded, delivers | packets/s |
-| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |
-| ipv6.bcast | received, sent | kilobits/s |
-| ipv6.mcast | received, sent | kilobits/s |
-| ipv6.mcastpkts | received, sent | packets/s |
-| ipv6.udppackets | received, sent | packets/s |
-| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |
-| ipv6.udplitepackets | received, sent | packets/s |
-| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |
-| ipv6.icmp | received, sent | messages/s |
-| ipv6.icmpredir | received, sent | redirects/s |
-| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |
-| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |
-| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |
-| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |
-| ipv6.icmpmldv2 | received, sent | reports/s |
-| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |
-| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |
-| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |
-| ipv6.fragsin | ok, failed, timeout, all | packets/s |
-| ipv6.fragsout | ok, failed, all | packets/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |
-| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |
-| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |
-| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |
-| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |
-| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |
-| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |
-| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |
-| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |
-| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |
-| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/nfs_client.md b/collectors/proc.plugin/integrations/nfs_client.md
deleted file mode 100644
index db5847714..000000000
--- a/collectors/proc.plugin/integrations/nfs_client.md
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/nfs_client.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "NFS Client"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# NFS Client
-
-
-<img src="https://netdata.cloud/img/nfs.png" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/rpc/nfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides statistics from the Linux kernel's NFS Client.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per NFS Client instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| nfs.net | udp, tcp | operations/s |
-| nfs.rpc | calls, retransmits, auth_refresh | calls/s |
-| nfs.proc2 | a dimension per proc2 call | calls/s |
-| nfs.proc3 | a dimension per proc3 call | calls/s |
-| nfs.proc4 | a dimension per proc4 call | calls/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/nfs_server.md b/collectors/proc.plugin/integrations/nfs_server.md
deleted file mode 100644
index 0c906b4d8..000000000
--- a/collectors/proc.plugin/integrations/nfs_server.md
+++ /dev/null
@@ -1,104 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/nfs_server.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "NFS Server"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Filesystem/NFS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# NFS Server
-
-
-<img src="https://netdata.cloud/img/nfs.png" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/rpc/nfsd
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides statistics from the Linux kernel's NFS Server.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per NFS Server instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| nfsd.readcache | hits, misses, nocache | reads/s |
-| nfsd.filehandles | stale | handles/s |
-| nfsd.io | read, write | kilobytes/s |
-| nfsd.threads | threads | threads |
-| nfsd.net | udp, tcp | packets/s |
-| nfsd.rpc | calls, bad_format, bad_auth | calls/s |
-| nfsd.proc2 | a dimension per proc2 call | calls/s |
-| nfsd.proc3 | a dimension per proc3 call | calls/s |
-| nfsd.proc4 | a dimension per proc4 call | calls/s |
-| nfsd.proc4ops | a dimension per proc4 operation | operations/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/non-uniform_memory_access.md b/collectors/proc.plugin/integrations/non-uniform_memory_access.md
deleted file mode 100644
index 6f495fb79..000000000
--- a/collectors/proc.plugin/integrations/non-uniform_memory_access.md
+++ /dev/null
@@ -1,111 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/non-uniform_memory_access.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Non-Uniform Memory Access"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Non-Uniform Memory Access
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/devices/system/node
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Information about NUMA (Non-Uniform Memory Access) nodes on the system.
-
-NUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can
-share memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a
-symmetric multiprocessing (SMP) system.
-
-In a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.
-Each node has its own memory and set of I/O devices, and one or more processors. While a processor can access
-memory in any of the nodes, it does so faster when accessing memory within its own node.
-
-The collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the
-efficiency of memory allocations in multi-node systems.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per numa node
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| numa_node | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/page_types.md b/collectors/proc.plugin/integrations/page_types.md
deleted file mode 100644
index b228629b6..000000000
--- a/collectors/proc.plugin/integrations/page_types.md
+++ /dev/null
@@ -1,113 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/page_types.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Page types"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Page types
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/pagetypeinfo
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides metrics about the system's memory page types
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Page types instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.pagetype_global | a dimension per pagesize | B |
-
-### Per node, zone, type
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| node_id | TBD |
-| node_zone | TBD |
-| node_type | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.pagetype | a dimension per pagesize | B |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/power_supply.md b/collectors/proc.plugin/integrations/power_supply.md
deleted file mode 100644
index 9a474e82a..000000000
--- a/collectors/proc.plugin/integrations/power_supply.md
+++ /dev/null
@@ -1,107 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/power_supply.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Power Supply"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Power Supply"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Power Supply
-
-
-<img src="https://netdata.cloud/img/powersupply.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/class/power_supply
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors Power supply metrics, such as battery status, AC power status and more.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per power device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| powersupply.capacity | capacity | percentage |
-| powersupply.charge | empty_design, empty, now, full, full_design | Ah |
-| powersupply.energy | empty_design, empty, now, full, full_design | Wh |
-| powersupply.voltage | min_design, min, now, max, max_design | V |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/pressure_stall_information.md b/collectors/proc.plugin/integrations/pressure_stall_information.md
deleted file mode 100644
index 53f4aa050..000000000
--- a/collectors/proc.plugin/integrations/pressure_stall_information.md
+++ /dev/null
@@ -1,129 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/pressure_stall_information.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Pressure Stall Information"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Pressure"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Pressure Stall Information
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/pressure
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Introduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information
-(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to
-resource contention, such as CPU, memory, or I/O.
-
-The collectors monitored 3 separate files for CPU, memory, and I/O:
-
-- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.
-- **memory**: Tracks the amount of time tasks are stalled due to memory contention.
-- **io**: Tracks the amount of time tasks are stalled due to I/O contention.
-- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.
-
-Each of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.
-
-Monitoring the /proc/pressure files can provide important insights into system performance and capacity planning:
-
-- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are
- frequently being stalled due to lack of resources, which can significantly degrade system performance.
-
-- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can
- help identify whether resource contention is the cause.
-
-- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource
- utilization and make informed decisions about when to add more resources to your system.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Pressure Stall Information instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.cpu_some_pressure | some10, some60, some300 | percentage |
-| system.cpu_some_pressure_stall_time | time | ms |
-| system.cpu_full_pressure | some10, some60, some300 | percentage |
-| system.cpu_full_pressure_stall_time | time | ms |
-| system.memory_some_pressure | some10, some60, some300 | percentage |
-| system.memory_some_pressure_stall_time | time | ms |
-| system.memory_full_pressure | some10, some60, some300 | percentage |
-| system.memory_full_pressure_stall_time | time | ms |
-| system.io_some_pressure | some10, some60, some300 | percentage |
-| system.io_some_pressure_stall_time | time | ms |
-| system.io_full_pressure | some10, some60, some300 | percentage |
-| system.io_full_pressure_stall_time | time | ms |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/sctp_statistics.md b/collectors/proc.plugin/integrations/sctp_statistics.md
deleted file mode 100644
index 15c0d424d..000000000
--- a/collectors/proc.plugin/integrations/sctp_statistics.md
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/sctp_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "SCTP Statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# SCTP Statistics
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/sctp/snmp
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides statistics about the Stream Control Transmission Protocol (SCTP).
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per SCTP Statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| sctp.established | established | associations |
-| sctp.transitions | active, passive, aborted, shutdown | transitions/s |
-| sctp.packets | received, sent | packets/s |
-| sctp.packet_errors | invalid, checksum | packets/s |
-| sctp.fragmentation | reassembled, fragmented | packets/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/socket_statistics.md b/collectors/proc.plugin/integrations/socket_statistics.md
deleted file mode 100644
index d8ef26647..000000000
--- a/collectors/proc.plugin/integrations/socket_statistics.md
+++ /dev/null
@@ -1,109 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/socket_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Socket statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Socket statistics
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/sockstat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides socket statistics.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Socket statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ip.sockstat_sockets | used | sockets |
-| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |
-| ipv4.sockstat_tcp_mem | mem | KiB |
-| ipv4.sockstat_udp_sockets | inuse | sockets |
-| ipv4.sockstat_udp_mem | mem | sockets |
-| ipv4.sockstat_udplite_sockets | inuse | sockets |
-| ipv4.sockstat_raw_sockets | inuse | sockets |
-| ipv4.sockstat_frag_sockets | inuse | fragments |
-| ipv4.sockstat_frag_mem | mem | KiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |
-| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/softirq_statistics.md b/collectors/proc.plugin/integrations/softirq_statistics.md
deleted file mode 100644
index f966cf971..000000000
--- a/collectors/proc.plugin/integrations/softirq_statistics.md
+++ /dev/null
@@ -1,133 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/softirq_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "SoftIRQ statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/CPU"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# SoftIRQ statistics
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/softirqs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-In the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.
-The top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.
-
-Softirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be
-deferred and processed later in a context where it's safe to enable interrupts.
-
-The actual work of handling the interrupt is offloaded to a softirq and executed later when the system
-decides it's a good time to process them. This helps to keep the system responsive by not blocking the top
-half for too long, which could lead to missed interrupts.
-
-Monitoring `/proc/softirqs` is useful for:
-
-- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high
- rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.
-
-- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about
- what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem
- with a disk.
-
-- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what
- your system is doing, particularly in terms of how it's interacting with hardware and how it's handling
- interrupts.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per SoftIRQ statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.softirqs | a dimension per softirq | softirqs/s |
-
-### Per cpu core
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| cpu | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.softirqs | a dimension per softirq | softirqs/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/softnet_statistics.md b/collectors/proc.plugin/integrations/softnet_statistics.md
deleted file mode 100644
index 58e6cf6e5..000000000
--- a/collectors/proc.plugin/integrations/softnet_statistics.md
+++ /dev/null
@@ -1,135 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/softnet_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Softnet Statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Softnet Statistics
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/softnet_stat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.
-
-It provides information about:
-
-- Total number of processed packets (`processed`).
-- Times ksoftirq ran out of quota (`dropped`).
-- Times net_rx_action was rescheduled.
-- Number of times processed all lists before quota.
-- Number of times did not process all lists due to quota.
-- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.
-- Number of times GRO cells were processed.
-
-Monitoring the /proc/net/softnet_stat file can be useful for:
-
-- **Network performance monitoring**: By tracking the total number of processed packets and how many packets
- were dropped, you can gain insights into your system's network performance.
-
-- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.
- For instance, a high number of dropped packets may indicate a network problem.
-
-- **Capacity planning**: If your system is consistently processing near its maximum capacity of network
- packets, it might be time to consider upgrading your network infrastructure.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Softnet Statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |
-
-### Per cpu core
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |
-| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/synproxy.md b/collectors/proc.plugin/integrations/synproxy.md
deleted file mode 100644
index 2db17ef6f..000000000
--- a/collectors/proc.plugin/integrations/synproxy.md
+++ /dev/null
@@ -1,97 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/synproxy.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Synproxy"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Firewall"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Synproxy
-
-
-<img src="https://netdata.cloud/img/firewall.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/stat/synproxy
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides statistics about the Synproxy netfilter module.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Synproxy instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| netfilter.synproxy_syn_received | received | packets/s |
-| netfilter.synproxy_conn_reopened | reopened | connections/s |
-| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/system_load_average.md b/collectors/proc.plugin/integrations/system_load_average.md
deleted file mode 100644
index 6e986d90c..000000000
--- a/collectors/proc.plugin/integrations/system_load_average.md
+++ /dev/null
@@ -1,128 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_load_average.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "System Load Average"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/System"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# System Load Average
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/loadavg
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-The `/proc/loadavg` file provides information about the system load average.
-
-The load average is a measure of the amount of computational work that a system performs. It is a
-representation of the average system load over a period of time.
-
-This file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,
-respectively. It also includes the currently running processes and the total number of processes.
-
-Monitoring the load average can be used for:
-
-- **System performance**: If the load average is too high, it may indicate that your system is overloaded.
- On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the
- load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is
- overloaded and tasks are waiting for CPU time.
-
-- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be
- due to a runaway process, a software bug, or a hardware issue.
-
-- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your
- system's workload. This can help with capacity planning and scaling decisions.
-
-Remember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.
-Therefore, high load averages could be due to I/O contention as well as CPU contention.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per System Load Average instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.load | load1, load5, load15 | load |
-| system.active_processes | active | processes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | number of active CPU cores in the system |
-| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system fifteen-minute load average |
-| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system five-minute load average |
-| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/health/health.d/load.conf) | system.load | system one-minute load average |
-| [ active_processes ](https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/system_statistics.md b/collectors/proc.plugin/integrations/system_statistics.md
deleted file mode 100644
index f3df1a19a..000000000
--- a/collectors/proc.plugin/integrations/system_statistics.md
+++ /dev/null
@@ -1,169 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_statistics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "System statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/System"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# System statistics
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/stat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-CPU utilization, states and frequencies and key Linux system performance metrics.
-
-The `/proc/stat` file provides various types of system statistics:
-
-- The overall system CPU usage statistics
-- Per CPU core statistics
-- The total context switching of the system
-- The total number of processes running
-- The total CPU interrupts
-- The total CPU softirqs
-
-The collector also reads:
-
-- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.
-- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.
-- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.
-- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.
-- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.
-- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.
-- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.
-
-
-
-
-This collector is only supported on the following platforms:
-
-- linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector auto-detects all metrics. No configuration is needed.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.
-
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per System statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |
-| system.intr | interrupts | interrupts/s |
-| system.ctxt | switches | context switches/s |
-| system.forks | started | processes/s |
-| system.processes | running, blocked | processes |
-| cpu.core_throttling | a dimension per cpu core | events/s |
-| cpu.package_throttling | a dimension per package | events/s |
-| cpu.cpufreq | a dimension per cpu core | MHz |
-
-### Per cpu core
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| cpu | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |
-| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |
-| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |
-| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/system_uptime.md b/collectors/proc.plugin/integrations/system_uptime.md
deleted file mode 100644
index 0954c0642..000000000
--- a/collectors/proc.plugin/integrations/system_uptime.md
+++ /dev/null
@@ -1,108 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/system_uptime.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "System Uptime"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/System"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# System Uptime
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/uptime
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-The amount of time the system has been up (running).
-
-Uptime is a critical aspect of overall system performance:
-
-- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.
-- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.
-- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.
-- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.
-- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.
-- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.
-- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.
-
-
-
-
-This collector is only supported on the following platforms:
-
-- linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per System Uptime instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.uptime | uptime | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/wireless_network_interfaces.md b/collectors/proc.plugin/integrations/wireless_network_interfaces.md
deleted file mode 100644
index a8d2406ee..000000000
--- a/collectors/proc.plugin/integrations/wireless_network_interfaces.md
+++ /dev/null
@@ -1,100 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/wireless_network_interfaces.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "Wireless network interfaces"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Wireless network interfaces
-
-
-<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/net/wireless
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor wireless devices with metrics about status, link quality, signal level, noise level and more.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per wireless device
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| wireless.status | status | status |
-| wireless.link_quality | link_quality | value |
-| wireless.signal_level | signal_level | dBm |
-| wireless.noise_level | noise_level | dBm |
-| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |
-| wireless.missed_beacons | missed_beacons | frames/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md b/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
deleted file mode 100644
index c200ba673..000000000
--- a/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md
+++ /dev/null
@@ -1,125 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zfs_adaptive_replacement_cache.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "ZFS Adaptive Replacement Cache"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# ZFS Adaptive Replacement Cache
-
-
-<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/spl/kstat/zfs/arcstats
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per ZFS Adaptive Replacement Cache instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| zfs.arc_size | arcsz, target, min, max | MiB |
-| zfs.l2_size | actual, size | MiB |
-| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |
-| zfs.bytes | read, write | KiB/s |
-| zfs.hits | hits, misses | percentage |
-| zfs.hits_rate | hits, misses | events/s |
-| zfs.dhits | hits, misses | percentage |
-| zfs.dhits_rate | hits, misses | events/s |
-| zfs.phits | hits, misses | percentage |
-| zfs.phits_rate | hits, misses | events/s |
-| zfs.mhits | hits, misses | percentage |
-| zfs.mhits_rate | hits, misses | events/s |
-| zfs.l2hits | hits, misses | percentage |
-| zfs.l2hits_rate | hits, misses | events/s |
-| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |
-| zfs.arc_size_breakdown | recent, frequent | percentage |
-| zfs.memory_ops | direct, throttled, indirect | operations/s |
-| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |
-| zfs.actual_hits | hits, misses | percentage |
-| zfs.actual_hits_rate | hits, misses | events/s |
-| zfs.demand_data_hits | hits, misses | percentage |
-| zfs.demand_data_hits_rate | hits, misses | events/s |
-| zfs.prefetch_data_hits | hits, misses | percentage |
-| zfs.prefetch_data_hits_rate | hits, misses | events/s |
-| zfs.hash_elements | current, max | elements |
-| zfs.hash_chains | current, max | chains |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/zfs_pools.md b/collectors/proc.plugin/integrations/zfs_pools.md
deleted file mode 100644
index 2985d39b0..000000000
--- a/collectors/proc.plugin/integrations/zfs_pools.md
+++ /dev/null
@@ -1,105 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zfs_pools.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "ZFS Pools"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Filesystem/ZFS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# ZFS Pools
-
-
-<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /proc/spl/kstat/zfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This integration provides metrics about the state of ZFS pools.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per zfs pool
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| pool | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |
-| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/integrations/zram.md b/collectors/proc.plugin/integrations/zram.md
deleted file mode 100644
index 111b17c62..000000000
--- a/collectors/proc.plugin/integrations/zram.md
+++ /dev/null
@@ -1,106 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/integrations/zram.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/proc.plugin/metadata.yaml"
-sidebar_label: "ZRAM"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Memory"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# ZRAM
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: proc.plugin
-Module: /sys/block/zram
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-zRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.
-The data written to this block device is compressed and stored in memory.
-
-The collectors provides information about the operation and the effectiveness of zRAM on your system.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per zram device
-
-
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | TBD |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.zram_usage | compressed, metadata | MiB |
-| mem.zram_savings | savings, original | MiB |
-| mem.zram_ratio | ratio | ratio |
-| mem.zram_efficiency | percent | percentage |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-
-
-There are no configuration options.
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/proc.plugin/ipc.c b/collectors/proc.plugin/ipc.c
deleted file mode 100644
index 204977bdf..000000000
--- a/collectors/proc.plugin/ipc.c
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#include <sys/sem.h>
-#include <sys/msg.h>
-#include <sys/shm.h>
-
-
-#ifndef SEMVMX
-#define SEMVMX 32767 /* <= 32767 semaphore maximum value */
-#endif
-
-/* Some versions of libc only define IPC_INFO when __USE_GNU is defined. */
-#ifndef IPC_INFO
-#define IPC_INFO 3
-#endif
-
-struct ipc_limits {
- uint64_t shmmni; /* max number of segments */
- uint64_t shmmax; /* max segment size */
- uint64_t shmall; /* max total shared memory */
- uint64_t shmmin; /* min segment size */
-
- int semmni; /* max number of arrays */
- int semmsl; /* max semaphores per array */
- int semmns; /* max semaphores system wide */
- int semopm; /* max ops per semop call */
- unsigned int semvmx; /* semaphore max value (constant) */
-
- int msgmni; /* max queues system wide */
- size_t msgmax; /* max size of message */
- int msgmnb; /* default max size of queue */
-};
-
-struct ipc_status {
- int semusz; /* current number of arrays */
- int semaem; /* current semaphores system wide */
-};
-
-/*
- * The last arg of semctl is a union semun, but where is it defined? X/OPEN
- * tells us to define it ourselves, but until recently Linux include files
- * would also define it.
- */
-#ifndef HAVE_UNION_SEMUN
-/* according to X/OPEN we have to define it ourselves */
-union semun {
- int val;
- struct semid_ds *buf;
- unsigned short int *array;
- struct seminfo *__buf;
-};
-#endif
-
-struct message_queue {
- unsigned long long id;
- int found;
-
- RRDDIM *rd_messages;
- RRDDIM *rd_bytes;
- unsigned long long messages;
- unsigned long long bytes;
-
- struct message_queue * next;
-};
-
-struct shm_stats {
- unsigned long long segments;
- unsigned long long bytes;
-};
-
-static inline int ipc_sem_get_limits(struct ipc_limits *lim) {
- static procfile *ff = NULL;
- static int error_shown = 0;
- static char filename[FILENAME_MAX + 1] = "";
-
- if(unlikely(!filename[0]))
- snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/sem", netdata_configured_host_prefix);
-
- if(unlikely(!ff)) {
- ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- if(unlikely(!error_shown)) {
- collector_error("IPC: Cannot open file '%s'.", filename);
- error_shown = 1;
- }
- goto ipc;
- }
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- if(unlikely(!error_shown)) {
- collector_error("IPC: Cannot read file '%s'.", filename);
- error_shown = 1;
- }
- goto ipc;
- }
-
- if(procfile_lines(ff) >= 1 && procfile_linewords(ff, 0) >= 4) {
- lim->semvmx = SEMVMX;
- lim->semmsl = str2i(procfile_lineword(ff, 0, 0));
- lim->semmns = str2i(procfile_lineword(ff, 0, 1));
- lim->semopm = str2i(procfile_lineword(ff, 0, 2));
- lim->semmni = str2i(procfile_lineword(ff, 0, 3));
- return 0;
- }
- else {
- if(unlikely(!error_shown)) {
- collector_error("IPC: Invalid content in file '%s'.", filename);
- error_shown = 1;
- }
- goto ipc;
- }
-
-ipc:
- // cannot do it from the file
- // query IPC
- {
- struct seminfo seminfo = {.semmni = 0};
- union semun arg = {.array = (ushort *) &seminfo};
-
- if(unlikely(semctl(0, 0, IPC_INFO, arg) < 0)) {
- collector_error("IPC: Failed to read '%s' and request IPC_INFO with semctl().", filename);
- goto error;
- }
-
- lim->semvmx = SEMVMX;
- lim->semmni = seminfo.semmni;
- lim->semmsl = seminfo.semmsl;
- lim->semmns = seminfo.semmns;
- lim->semopm = seminfo.semopm;
- return 0;
- }
-
-error:
- lim->semvmx = 0;
- lim->semmni = 0;
- lim->semmsl = 0;
- lim->semmns = 0;
- lim->semopm = 0;
- return -1;
-}
-
-/*
-printf ("------ Semaphore Limits --------\n");
-printf ("max number of arrays = %d\n", limits.semmni);
-printf ("max semaphores per array = %d\n", limits.semmsl);
-printf ("max semaphores system wide = %d\n", limits.semmns);
-printf ("max ops per semop call = %d\n", limits.semopm);
-printf ("semaphore max value = %u\n", limits.semvmx);
-
-printf ("------ Semaphore Status --------\n");
-printf ("used arrays = %d\n", status.semusz);
-printf ("allocated semaphores = %d\n", status.semaem);
-*/
-
-static inline int ipc_sem_get_status(struct ipc_status *st) {
- struct seminfo seminfo;
- union semun arg;
-
- arg.array = (ushort *) (void *) &seminfo;
-
- if(unlikely(semctl (0, 0, SEM_INFO, arg) < 0)) {
- /* kernel not configured for semaphores */
- static int error_shown = 0;
- if(unlikely(!error_shown)) {
- collector_error("IPC: kernel is not configured for semaphores");
- error_shown = 1;
- }
- st->semusz = 0;
- st->semaem = 0;
- return -1;
- }
-
- st->semusz = seminfo.semusz;
- st->semaem = seminfo.semaem;
- return 0;
-}
-
-int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) {
- static procfile *ff;
- struct message_queue *msq;
-
- if(unlikely(!ff)) {
- ff = procfile_open(msg_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 1;
-
- size_t lines = procfile_lines(ff);
- size_t words = 0;
-
- if(unlikely(lines < 2)) {
- collector_error("Cannot read %s. Expected 2 or more lines, read %zu.", procfile_filename(ff), lines);
- return 1;
- }
-
- // loop through all lines except the first and the last ones
- size_t l;
- for(l = 1; l < lines - 1; l++) {
- words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) continue;
- if(unlikely(words < 14)) {
- collector_error("Cannot read %s line. Expected 14 params, read %zu.", procfile_filename(ff), words);
- continue;
- }
-
- // find the id in the linked list or create a new structure
- int found = 0;
-
- unsigned long long id = str2ull(procfile_lineword(ff, l, 1), NULL);
- for(msq = *message_queue_root; msq ; msq = msq->next) {
- if(unlikely(id == msq->id)) {
- found = 1;
- break;
- }
- }
-
- if(unlikely(!found)) {
- msq = callocz(1, sizeof(struct message_queue));
- msq->next = *message_queue_root;
- *message_queue_root = msq;
- msq->id = id;
- }
-
- msq->messages = str2ull(procfile_lineword(ff, l, 4), NULL);
- msq->bytes = str2ull(procfile_lineword(ff, l, 3), NULL);
- msq->found = 1;
- }
-
- return 0;
-}
-
-int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) {
- static procfile *ff;
-
- if(unlikely(!ff)) {
- ff = procfile_open(shm_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 1;
-
- size_t lines = procfile_lines(ff);
- size_t words = 0;
-
- if(unlikely(lines < 2)) {
- collector_error("Cannot read %s. Expected 2 or more lines, read %zu.", procfile_filename(ff), lines);
- return 1;
- }
-
- shm->segments = 0;
- shm->bytes = 0;
-
- // loop through all lines except the first and the last ones
- size_t l;
- for(l = 1; l < lines - 1; l++) {
- words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) continue;
- if(unlikely(words < 16)) {
- collector_error("Cannot read %s line. Expected 16 params, read %zu.", procfile_filename(ff), words);
- continue;
- }
-
- shm->segments++;
- shm->bytes += str2ull(procfile_lineword(ff, l, 3), NULL);
- }
-
- return 0;
-}
-
-int do_ipc(int update_every, usec_t dt) {
- (void)dt;
-
- static int do_sem = -1, do_msg = -1, do_shm = -1;
- static int read_limits_next = -1;
- static struct ipc_limits limits;
- static struct ipc_status status;
- static const RRDVAR_ACQUIRED *arrays_max = NULL, *semaphores_max = NULL;
- static RRDSET *st_semaphores = NULL, *st_arrays = NULL;
- static RRDDIM *rd_semaphores = NULL, *rd_arrays = NULL;
- static char *msg_filename = NULL;
- static struct message_queue *message_queue_root = NULL;
- static long long dimensions_limit;
- static char *shm_filename = NULL;
-
- if(unlikely(do_sem == -1)) {
- do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES);
- do_sem = config_get_boolean("plugin:proc:ipc", "semaphore totals", CONFIG_BOOLEAN_YES);
- do_shm = config_get_boolean("plugin:proc:ipc", "shared memory totals", CONFIG_BOOLEAN_YES);
-
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sysvipc/msg");
- msg_filename = config_get("plugin:proc:ipc", "msg filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sysvipc/shm");
- shm_filename = config_get("plugin:proc:ipc", "shm filename to monitor", filename);
-
- dimensions_limit = config_get_number("plugin:proc:ipc", "max dimensions in memory allowed", 50);
-
- // make sure it works
- if(ipc_sem_get_limits(&limits) == -1) {
- collector_error("unable to fetch semaphore limits");
- do_sem = CONFIG_BOOLEAN_NO;
- }
- else if(ipc_sem_get_status(&status) == -1) {
- collector_error("unable to fetch semaphore statistics");
- do_sem = CONFIG_BOOLEAN_NO;
- }
- else {
- // create the charts
- if(unlikely(!st_semaphores)) {
- st_semaphores = rrdset_create_localhost(
- "system"
- , "ipc_semaphores"
- , NULL
- , "ipc semaphores"
- , NULL
- , "IPC Semaphores"
- , "semaphores"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES
- , localhost->rrd_update_every
- , RRDSET_TYPE_AREA
- );
- rd_semaphores = rrddim_add(st_semaphores, "semaphores", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if(unlikely(!st_arrays)) {
- st_arrays = rrdset_create_localhost(
- "system"
- , "ipc_semaphore_arrays"
- , NULL
- , "ipc semaphores"
- , NULL
- , "IPC Semaphore Arrays"
- , "arrays"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS
- , localhost->rrd_update_every
- , RRDSET_TYPE_AREA
- );
- rd_arrays = rrddim_add(st_arrays, "arrays", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- // variables
- semaphores_max = rrdvar_custom_host_variable_add_and_acquire(localhost, "ipc_semaphores_max");
- arrays_max = rrdvar_custom_host_variable_add_and_acquire(localhost, "ipc_semaphores_arrays_max");
- }
-
- struct stat stbuf;
- if (stat(msg_filename, &stbuf)) {
- do_msg = CONFIG_BOOLEAN_NO;
- }
-
- if(unlikely(do_sem == CONFIG_BOOLEAN_NO && do_msg == CONFIG_BOOLEAN_NO)) {
- collector_error("ipc module disabled");
- return 1;
- }
- }
-
- if(likely(do_sem != CONFIG_BOOLEAN_NO)) {
- if(unlikely(read_limits_next < 0)) {
- if(unlikely(ipc_sem_get_limits(&limits) == -1)) {
- collector_error("Unable to fetch semaphore limits.");
- }
- else {
- if(semaphores_max) rrdvar_custom_host_variable_set(localhost, semaphores_max, limits.semmns);
- if(arrays_max) rrdvar_custom_host_variable_set(localhost, arrays_max, limits.semmni);
-
- st_arrays->red = limits.semmni;
- st_semaphores->red = limits.semmns;
-
- read_limits_next = 60 / update_every;
- }
- }
- else
- read_limits_next--;
-
- if(unlikely(ipc_sem_get_status(&status) == -1)) {
- collector_error("Unable to get semaphore statistics");
- return 0;
- }
-
- rrddim_set_by_pointer(st_semaphores, rd_semaphores, status.semaem);
- rrdset_done(st_semaphores);
-
- rrddim_set_by_pointer(st_arrays, rd_arrays, status.semusz);
- rrdset_done(st_arrays);
- }
-
- if(likely(do_msg != CONFIG_BOOLEAN_NO)) {
- static RRDSET *st_msq_messages = NULL, *st_msq_bytes = NULL;
-
- int ret = ipc_msq_get_info(msg_filename, &message_queue_root);
-
- if(!ret && message_queue_root) {
- if(unlikely(!st_msq_messages))
- st_msq_messages = rrdset_create_localhost(
- "system"
- , "message_queue_messages"
- , NULL
- , "ipc message queues"
- , NULL
- , "IPC Message Queue Number of Messages"
- , "messages"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- if(unlikely(!st_msq_bytes))
- st_msq_bytes = rrdset_create_localhost(
- "system"
- , "message_queue_bytes"
- , NULL
- , "ipc message queues"
- , NULL
- , "IPC Message Queue Used Bytes"
- , "bytes"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- struct message_queue *msq = message_queue_root, *msq_prev = NULL;
- while(likely(msq)){
- if(likely(msq->found)) {
- if(unlikely(!msq->rd_messages || !msq->rd_bytes)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%llu", msq->id);
- if(likely(!msq->rd_messages)) msq->rd_messages = rrddim_add(st_msq_messages, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- if(likely(!msq->rd_bytes)) msq->rd_bytes = rrddim_add(st_msq_bytes, id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_msq_messages, msq->rd_messages, msq->messages);
- rrddim_set_by_pointer(st_msq_bytes, msq->rd_bytes, msq->bytes);
-
- msq->found = 0;
- }
- else {
- rrddim_is_obsolete___safe_from_collector_thread(st_msq_messages, msq->rd_messages);
- rrddim_is_obsolete___safe_from_collector_thread(st_msq_bytes, msq->rd_bytes);
-
- // remove message queue from the linked list
- if(!msq_prev)
- message_queue_root = msq->next;
- else
- msq_prev->next = msq->next;
- freez(msq);
- msq = NULL;
- }
- if(likely(msq)) {
- msq_prev = msq;
- msq = msq->next;
- }
- else if(!msq_prev)
- msq = message_queue_root;
- else
- msq = msq_prev->next;
- }
-
- rrdset_done(st_msq_messages);
- rrdset_done(st_msq_bytes);
-
- long long dimensions_num = rrdset_number_of_dimensions(st_msq_messages);
-
- if(unlikely(dimensions_num > dimensions_limit)) {
- collector_info("Message queue statistics has been disabled");
- collector_info("There are %lld dimensions in memory but limit was set to %lld", dimensions_num, dimensions_limit);
- rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages);
- rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes);
- st_msq_messages = NULL;
- st_msq_bytes = NULL;
- do_msg = CONFIG_BOOLEAN_NO;
- }
- else if(unlikely(!message_queue_root)) {
- collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_messages), rrdset_id(st_msq_messages));
- rrdset_is_obsolete___safe_from_collector_thread(st_msq_messages);
- st_msq_messages = NULL;
-
- collector_info("Making chart %s (%s) obsolete since it does not have any dimensions", rrdset_name(st_msq_bytes), rrdset_id(st_msq_bytes));
- rrdset_is_obsolete___safe_from_collector_thread(st_msq_bytes);
- st_msq_bytes = NULL;
- }
- }
- }
-
- if(likely(do_shm != CONFIG_BOOLEAN_NO)) {
- static RRDSET *st_shm_segments = NULL, *st_shm_bytes = NULL;
- static RRDDIM *rd_shm_segments = NULL, *rd_shm_bytes = NULL;
- struct shm_stats shm;
-
- if(!ipc_shm_get_info(shm_filename, &shm)) {
- if(unlikely(!st_shm_segments)) {
- st_shm_segments = rrdset_create_localhost(
- "system"
- , "shared_memory_segments"
- , NULL
- , "ipc shared memory"
- , NULL
- , "IPC Shared Memory Number of Segments"
- , "segments"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_shm_segments = rrddim_add(st_shm_segments, "segments", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_shm_segments, rd_shm_segments, shm.segments);
- rrdset_done(st_shm_segments);
-
- if(unlikely(!st_shm_bytes)) {
- st_shm_bytes = rrdset_create_localhost(
- "system"
- , "shared_memory_bytes"
- , NULL
- , "ipc shared memory"
- , NULL
- , "IPC Shared Memory Used Bytes"
- , "bytes"
- , PLUGIN_PROC_NAME
- , "ipc"
- , NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_shm_bytes = rrddim_add(st_shm_bytes, "bytes", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_shm_bytes, rd_shm_bytes, shm.bytes);
- rrdset_done(st_shm_bytes);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/metadata.yaml b/collectors/proc.plugin/metadata.yaml
deleted file mode 100644
index 45351b36f..000000000
--- a/collectors/proc.plugin/metadata.yaml
+++ /dev/null
@@ -1,5299 +0,0 @@
-plugin_name: proc.plugin
-modules:
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/stat
- monitored_instance:
- name: System statistics
- link: ""
- categories:
- - data-collection.linux-systems.system-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - cpu utilization
- - process counts
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- CPU utilization, states and frequencies and key Linux system performance metrics.
-
- The `/proc/stat` file provides various types of system statistics:
-
- - The overall system CPU usage statistics
- - Per CPU core statistics
- - The total context switching of the system
- - The total number of processes running
- - The total CPU interrupts
- - The total CPU softirqs
-
- The collector also reads:
-
- - `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.
- - `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.
- - `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.
- - `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.
- - `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.
- - `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.
- - `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.
- method_description: ""
- supported_platforms:
- include: ["linux"]
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- The collector auto-detects all metrics. No configuration is needed.
- limits:
- description: ""
- performance_impact:
- description: |
- The collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- section_name: "plugin:proc:/proc/stat"
- name: "netdata.conf"
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 10min_cpu_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU utilization over the last 10 minutes (excluding iowait, nice and steal)
- os: "linux"
- - name: 10min_cpu_iowait
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU iowait time over the last 10 minutes
- os: "linux"
- - name: 20min_steal_cpu
- link: https://github.com/netdata/netdata/blob/master/health/health.d/cpu.conf
- metric: system.cpu
- info: average CPU steal time over the last 20 minutes
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.cpu
- description: Total CPU utilization
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: guest_nice
- - name: guest
- - name: steal
- - name: softirq
- - name: irq
- - name: user
- - name: system
- - name: nice
- - name: iowait
- - name: idle
- - name: system.intr
- description: CPU Interrupts
- unit: "interrupts/s"
- chart_type: line
- dimensions:
- - name: interrupts
- - name: system.ctxt
- description: CPU Context Switches
- unit: "context switches/s"
- chart_type: line
- dimensions:
- - name: switches
- - name: system.forks
- description: Started Processes
- unit: "processes/s"
- chart_type: line
- dimensions:
- - name: started
- - name: system.processes
- description: System Processes
- unit: "processes"
- chart_type: line
- dimensions:
- - name: running
- - name: blocked
- - name: cpu.core_throttling
- description: Core Thermal Throttling Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: a dimension per cpu core
- - name: cpu.package_throttling
- description: Package Thermal Throttling Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: a dimension per package
- - name: cpu.cpufreq
- description: Current CPU Frequency
- unit: "MHz"
- chart_type: line
- dimensions:
- - name: a dimension per cpu core
- - name: cpu core
- description: ""
- labels:
- - name: cpu
- description: TBD
- metrics:
- - name: cpu.cpu
- description: Core utilization
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: guest_nice
- - name: guest
- - name: steal
- - name: softirq
- - name: irq
- - name: user
- - name: system
- - name: nice
- - name: iowait
- - name: idle
- - name: cpuidle.cpu_cstate_residency_time
- description: C-state residency time
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: a dimension per c-state
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/sys/kernel/random/entropy_avail
- monitored_instance:
- name: Entropy
- link: ""
- categories:
- - data-collection.linux-systems.system-metrics
- icon_filename: "syslog.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - entropy
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Entropy, a measure of the randomness or unpredictability of data.
-
- In the context of cryptography, entropy is used to generate random numbers or keys that are essential for
- secure communication and encryption. Without a good source of entropy, cryptographic protocols can become
- vulnerable to attacks that exploit the predictability of the generated keys.
-
- In most operating systems, entropy is generated by collecting random events from various sources, such as
- hardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool
- of entropy, which is then used to generate random numbers when needed.
-
- The `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs
- to access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,
- which blocks until enough entropy is available to generate the requested numbers. This ensures that the
- generated numbers are truly random and not predictable.
-
- However, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing
- programs that rely on random numbers to slow down or even freeze. This is especially problematic for
- cryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.
-
- To avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality
- entropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or
- radioactive decay. These sources of randomness are considered to be more reliable and unpredictable than
- software-based sources.
-
- One such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used
- for cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates
- high-quality entropy, which can be used to seed the pool of entropy in the operating system.
-
- Alternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by
- exploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions
- can help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.
- method_description: ""
- supported_platforms:
- include: ["linux"]
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: lowest_entropy
- link: https://github.com/netdata/netdata/blob/master/health/health.d/entropy.conf
- metric: system.entropy
- info: minimum number of bits of entropy available for the kernel’s random number generator
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.entropy
- description: Available Entropy
- unit: "entropy"
- chart_type: line
- dimensions:
- - name: entropy
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/uptime
- monitored_instance:
- name: System Uptime
- link: ""
- categories:
- - data-collection.linux-systems.system-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - uptime
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- The amount of time the system has been up (running).
-
- Uptime is a critical aspect of overall system performance:
-
- - **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.
- - **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.
- - **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.
- - **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.
- - **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.
- - **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.
- - **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.
- method_description: ""
- supported_platforms:
- include: ["linux"]
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.uptime
- description: System Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/vmstat
- monitored_instance:
- name: Memory Statistics
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - swap
- - page faults
- - oom
- - numa
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Linux Virtual memory subsystem.
-
- Information about memory management, indicating how effectively the kernel allocates and frees
- memory resources in response to system demands.
-
- Monitors page faults, which occur when a process requests a portion of its memory that isn't
- immediately available. Monitoring these events can help diagnose inefficiencies in memory management and
- provide insights into application behavior.
-
- Tracks swapping activity — a vital aspect of memory management where the kernel moves data from RAM to
- swap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,
- a compressed cache for swap pages, and provides insights into its usage and performance implications.
-
- In the context of virtualized environments, it tracks the ballooning mechanism which is used to balance
- memory resources between host and guest systems.
-
- For systems using NUMA architecture, it provides insights into the local and remote memory accesses, which
- can impact the performance based on the memory access times.
-
- The collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out
- of memory resources.
- method_description: ""
- supported_platforms:
- include: ["linux"]
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 30min_ram_swapped_out
- link: https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf
- metric: mem.swapio
- info: percentage of the system RAM swapped in the last 30 minutes
- os: "linux freebsd"
- - name: oom_kill
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: mem.oom_kill
- info: number of out of memory kills in the last 30 minutes
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mem.swapio
- description: Swap I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: system.pgpgio
- description: Memory Paged from/to disk
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: system.pgfaults
- description: Memory Page Faults
- unit: "faults/s"
- chart_type: line
- dimensions:
- - name: minor
- - name: major
- - name: mem.balloon
- description: Memory Ballooning Operations
- unit: "KiB/s"
- chart_type: line
- dimensions:
- - name: inflate
- - name: deflate
- - name: migrate
- - name: mem.zswapio
- description: ZSwap I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: mem.ksm_cow
- description: KSM Copy On Write Operations
- unit: "KiB/s"
- chart_type: line
- dimensions:
- - name: swapin
- - name: write
- - name: mem.thp_faults
- description: Transparent Huge Page Fault Allocations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: alloc
- - name: fallback
- - name: fallback_charge
- - name: mem.thp_file
- description: Transparent Huge Page File Allocations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: alloc
- - name: fallback
- - name: mapped
- - name: fallback_charge
- - name: mem.thp_zero
- description: Transparent Huge Zero Page Allocations
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: alloc
- - name: failed
- - name: mem.thp_collapse
- description: Transparent Huge Pages Collapsed by khugepaged
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: alloc
- - name: failed
- - name: mem.thp_split
- description: Transparent Huge Page Splits
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: split
- - name: failed
- - name: split_pmd
- - name: split_deferred
- - name: mem.thp_swapout
- description: Transparent Huge Pages Swap Out
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: swapout
- - name: fallback
- - name: mem.thp_compact
- description: Transparent Huge Pages Compaction
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: success
- - name: fail
- - name: stall
- - name: mem.oom_kill
- description: Out of Memory Kills
- unit: "kills/s"
- chart_type: line
- dimensions:
- - name: kills
- - name: mem.numa
- description: NUMA events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: local
- - name: foreign
- - name: interleave
- - name: other
- - name: pte_updates
- - name: huge_pte_updates
- - name: hint_faults
- - name: hint_faults_local
- - name: pages_migrated
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/interrupts
- monitored_instance:
- name: Interrupts
- link: ""
- categories:
- - data-collection.linux-systems.cpu-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - interrupts
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Monitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.
- The numbers reported are the counts of the interrupts that have occurred of each type.
-
- An interrupt is a signal to the processor emitted by hardware or software indicating an event that needs
- immediate attention. The processor then interrupts its current activities and executes the interrupt handler
- to deal with the event. This is part of the way a computer multitasks and handles concurrent processing.
-
- The types of interrupts include:
-
- - **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when
- you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.
-
- - **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily
- used to switch the CPU among different tasks.
-
- - **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.
-
- - **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.
-
- Monitoring `/proc/interrupts` can be used for:
-
- - **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not
- configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system
- performance degradation.
-
- - **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.
-
- - **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you
- understand what your system is doing. It can provide insights into the system's interaction with hardware,
- drivers, and other parts of the kernel.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.interrupts
- description: System interrupts
- unit: "interrupts/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per device
- - name: cpu core
- description: ""
- labels:
- - name: cpu
- description: TBD
- metrics:
- - name: cpu.interrupts
- description: CPU interrupts
- unit: "interrupts/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per device
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/loadavg
- monitored_instance:
- name: System Load Average
- link: ""
- categories:
- - data-collection.linux-systems.system-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - load
- - load average
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- The `/proc/loadavg` file provides information about the system load average.
-
- The load average is a measure of the amount of computational work that a system performs. It is a
- representation of the average system load over a period of time.
-
- This file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,
- respectively. It also includes the currently running processes and the total number of processes.
-
- Monitoring the load average can be used for:
-
- - **System performance**: If the load average is too high, it may indicate that your system is overloaded.
- On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the
- load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is
- overloaded and tasks are waiting for CPU time.
-
- - **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be
- due to a runaway process, a software bug, or a hardware issue.
-
- - **Capacity planning**: By monitoring the load average over time, you can understand the trends in your
- system's workload. This can help with capacity planning and scaling decisions.
-
- Remember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.
- Therefore, high load averages could be due to I/O contention as well as CPU contention.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: load_cpu_number
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: number of active CPU cores in the system
- os: "linux"
- - name: load_average_15
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system fifteen-minute load average
- os: "linux"
- - name: load_average_5
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system five-minute load average
- os: "linux"
- - name: load_average_1
- link: https://github.com/netdata/netdata/blob/master/health/health.d/load.conf
- metric: system.load
- info: system one-minute load average
- os: "linux"
- - name: active_processes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/processes.conf
- metric: system.active_processes
- info: system process IDs (PID) space utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.load
- description: System Load Average
- unit: "load"
- chart_type: line
- dimensions:
- - name: load1
- - name: load5
- - name: load15
- - name: system.active_processes
- description: System Active Processes
- unit: "processes"
- chart_type: line
- dimensions:
- - name: active
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/pressure
- monitored_instance:
- name: Pressure Stall Information
- link: ""
- categories:
- - data-collection.linux-systems.pressure-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - pressure
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Introduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information
- (PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to
- resource contention, such as CPU, memory, or I/O.
-
- The collectors monitored 3 separate files for CPU, memory, and I/O:
-
- - **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.
- - **memory**: Tracks the amount of time tasks are stalled due to memory contention.
- - **io**: Tracks the amount of time tasks are stalled due to I/O contention.
- - **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.
-
- Each of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.
-
- Monitoring the /proc/pressure files can provide important insights into system performance and capacity planning:
-
- - **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are
- frequently being stalled due to lack of resources, which can significantly degrade system performance.
-
- - **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can
- help identify whether resource contention is the cause.
-
- - **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource
- utilization and make informed decisions about when to add more resources to your system.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.cpu_some_pressure
- description: CPU some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.cpu_some_pressure_stall_time
- description: CPU some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: system.cpu_full_pressure
- description: CPU full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.cpu_full_pressure_stall_time
- description: CPU full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: system.memory_some_pressure
- description: Memory some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.memory_some_pressure_stall_time
- description: Memory some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: system.memory_full_pressure
- description: Memory full pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.memory_full_pressure_stall_time
- description: Memory full pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: system.io_some_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.io_some_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - name: system.io_full_pressure
- description: I/O some pressure
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: some10
- - name: some60
- - name: some300
- - name: system.io_full_pressure_stall_time
- description: I/O some pressure stall time
- unit: "ms"
- chart_type: line
- dimensions:
- - name: time
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/softirqs
- monitored_instance:
- name: SoftIRQ statistics
- link: ""
- categories:
- - data-collection.linux-systems.cpu-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - softirqs
- - interrupts
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- In the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.
- The top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.
-
- Softirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be
- deferred and processed later in a context where it's safe to enable interrupts.
-
- The actual work of handling the interrupt is offloaded to a softirq and executed later when the system
- decides it's a good time to process them. This helps to keep the system responsive by not blocking the top
- half for too long, which could lead to missed interrupts.
-
- Monitoring `/proc/softirqs` is useful for:
-
- - **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high
- rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.
-
- - **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about
- what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem
- with a disk.
-
- - **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what
- your system is doing, particularly in terms of how it's interacting with hardware and how it's handling
- interrupts.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.softirqs
- description: System softirqs
- unit: "softirqs/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per softirq
- - name: cpu core
- description: ""
- labels:
- - name: cpu
- description: TBD
- metrics:
- - name: cpu.softirqs
- description: CPU softirqs
- unit: "softirqs/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per softirq
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/softnet_stat
- monitored_instance:
- name: Softnet Statistics
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - softnet
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- `/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.
-
- It provides information about:
-
- - Total number of processed packets (`processed`).
- - Times ksoftirq ran out of quota (`dropped`).
- - Times net_rx_action was rescheduled.
- - Number of times processed all lists before quota.
- - Number of times did not process all lists due to quota.
- - Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.
- - Number of times GRO cells were processed.
-
- Monitoring the /proc/net/softnet_stat file can be useful for:
-
- - **Network performance monitoring**: By tracking the total number of processed packets and how many packets
- were dropped, you can gain insights into your system's network performance.
-
- - **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.
- For instance, a high number of dropped packets may indicate a network problem.
-
- - **Capacity planning**: If your system is consistently processing near its maximum capacity of network
- packets, it might be time to consider upgrading your network infrastructure.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 1min_netdev_backlog_exceeded
- link: https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf
- metric: system.softnet_stat
- info: average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog
- os: "linux"
- - name: 1min_netdev_budget_ran_outs
- link: https://github.com/netdata/netdata/blob/master/health/health.d/softnet.conf
- metric: system.softnet_stat
- info:
- average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last
- minute (this can be a cause for dropped packets)
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.softnet_stat
- description: System softnet_stat
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: processed
- - name: dropped
- - name: squeezed
- - name: received_rps
- - name: flow_limit_count
- - name: cpu core
- description: ""
- labels: []
- metrics:
- - name: cpu.softnet_stat
- description: CPU softnet_stat
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: processed
- - name: dropped
- - name: squeezed
- - name: received_rps
- - name: flow_limit_count
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/meminfo
- monitored_instance:
- name: Memory Usage
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - memory
- - ram
- - available
- - committed
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- `/proc/meminfo` provides detailed information about the system's current memory usage. It includes information
- about different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,
- SLAB memory, memory mappings, and more.
-
- Monitoring /proc/meminfo can be useful for:
-
- - **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system
- tuning and optimization. For example, if your system is frequently low on free memory, it might benefit
- from more RAM.
-
- - **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about
- whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could
- mean that your system is swapping out a lot of memory to disk, which can degrade performance.
-
- - **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed
- decisions about future capacity needs.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ram_in_use
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: system.ram
- info: system memory utilization
- os: "linux"
- - name: ram_available
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ram.conf
- metric: mem.available
- info: percentage of estimated amount of RAM available for userspace processes, without causing swapping
- os: "linux"
- - name: used_swap
- link: https://github.com/netdata/netdata/blob/master/health/health.d/swap.conf
- metric: mem.swap
- info: swap memory utilization
- os: "linux freebsd"
- - name: 1hour_memory_hw_corrupted
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf
- metric: mem.hwcorrupt
- info: amount of memory corrupted due to a hardware failure
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.ram
- description: System RAM
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: cached
- - name: buffers
- - name: mem.available
- description: Available RAM for applications
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: avail
- - name: mem.swap
- description: System Swap
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: mem.swap_cached
- description: Swap Memory Cached in RAM
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: cached
- - name: mem.zswap
- description: Zswap Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: in-ram
- - name: on-disk
- - name: mem.hwcorrupt
- description: Corrupted Memory detected by ECC
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: HardwareCorrupted
- - name: mem.commited
- description: Committed (Allocated) Memory
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: Commited_AS
- - name: mem.writeback
- description: Writeback Memory
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: Dirty
- - name: Writeback
- - name: FuseWriteback
- - name: NfsWriteback
- - name: Bounce
- - name: mem.kernel
- description: Memory Used by Kernel
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: Slab
- - name: KernelStack
- - name: PageTables
- - name: VmallocUsed
- - name: Percpu
- - name: mem.slab
- description: Reclaimable Kernel Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: reclaimable
- - name: unreclaimable
- - name: mem.hugepages
- description: Dedicated HugePages Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: surplus
- - name: reserved
- - name: mem.thp
- description: Transparent HugePages Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: anonymous
- - name: shmem
- - name: mem.thp_details
- description: Details of Transparent HugePages Usage
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: ShmemPmdMapped
- - name: FileHugePages
- - name: FilePmdMapped
- - name: mem.reclaiming
- description: Memory Reclaiming
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: Active
- - name: Inactive
- - name: Active(anon)
- - name: Inactive(anon)
- - name: Active(file)
- - name: Inactive(file)
- - name: Unevictable
- - name: Mlocked
- - name: mem.high_low
- description: High and Low Used and Free Memory Areas
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: high_used
- - name: low_used
- - name: high_free
- - name: low_free
- - name: mem.cma
- description: Contiguous Memory Allocator (CMA) Memory
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: used
- - name: free
- - name: mem.directmaps
- description: Direct Memory Mappings
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: 4k
- - name: 2m
- - name: 4m
- - name: 1g
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/pagetypeinfo
- monitored_instance:
- name: Page types
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - memory page types
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides metrics about the system's memory page types"
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mem.pagetype_global
- description: System orders available
- unit: "B"
- chart_type: stacked
- dimensions:
- - name: a dimension per pagesize
- - name: node, zone, type
- description: ""
- labels:
- - name: node_id
- description: TBD
- - name: node_zone
- description: TBD
- - name: node_type
- description: TBD
- metrics:
- - name: mem.pagetype
- description: pagetype_Node{node}_{zone}_{type}
- unit: "B"
- chart_type: stacked
- dimensions:
- - name: a dimension per pagesize
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/devices/system/edac/mc
- monitored_instance:
- name: Memory modules (DIMMs)
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - edac
- - ecc
- - dimm
- - ram
- - hardware
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- The Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,
- primarily ECC (Error-Correcting Code) memory errors.
-
- The collector provides data for:
-
- - Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:
- - errors related to a DIMM
- - errors that cannot be associated with a DIMM
-
- - Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:
- - memory controllers that can identify the physical DIMMS and report errors directly for them,
- - memory controllers that report errors for memory address ranges that can be linked to dimms.
- In this case the DIMMS reported may be more than the physical DIMMS installed.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ecc_memory_mc_noinfo_correctable
- metric: mem.edac_mc
- info: memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) in the last 10 minutes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf
- - name: ecc_memory_mc_noinfo_uncorrectable
- metric: mem.edac_mc
- info: memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) in the last 10 minutes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf
- - name: ecc_memory_dimm_correctable
- metric: mem.edac_mc_dimm
- info: DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors in the last 10 minutes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf
- - name: ecc_memory_dimm_uncorrectable
- metric: mem.edac_mc_dimm
- info: DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors in the last 10 minutes
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memory.conf
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: memory controller
- description: These metrics refer to the memory controller.
- labels:
- - name: controller
- description: "[mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller."
- - name: mc_name
- description: Memory controller type.
- - name: size_mb
- description: The amount of memory in megabytes that this memory controller manages.
- - name: max_location
- description: Last available memory slot in this memory controller.
- metrics:
- - name: mem.edac_mc
- description: Memory Controller (MC) Error Detection And Correction (EDAC) Errors
- unit: errors/s
- chart_type: line
- dimensions:
- - name: correctable
- - name: uncorrectable
- - name: correctable_noinfo
- - name: uncorrectable_noinfo
- - name: memory module
- description: These metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).
- labels:
- - name: controller
- description: "[mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller."
- - name: dimm
- description: "[dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module."
- - name: dimm_dev_type
- description: Type of DRAM device used in this memory module. For example, x1, x2, x4, x8.
- - name: dimm_edac_mode
- description: Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM.
- - name: dimm_label
- description: Label assigned to this memory module.
- - name: dimm_location
- description: Location of the memory module.
- - name: dimm_mem_type
- description: Type of the memory module.
- - name: size
- description: The amount of memory in megabytes that this memory module manages.
- metrics:
- - name: mem.edac_mc
- description: DIMM Error Detection And Correction (EDAC) Errors
- unit: errors/s
- chart_type: line
- dimensions:
- - name: correctable
- - name: uncorrectable
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/devices/system/node
- monitored_instance:
- name: Non-Uniform Memory Access
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "linuxserver.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - numa
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Information about NUMA (Non-Uniform Memory Access) nodes on the system.
-
- NUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can
- share memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a
- symmetric multiprocessing (SMP) system.
-
- In a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.
- Each node has its own memory and set of I/O devices, and one or more processors. While a processor can access
- memory in any of the nodes, it does so faster when accessing memory within its own node.
-
- The collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the
- efficiency of memory allocations in multi-node systems.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: numa node
- description: ""
- labels:
- - name: numa_node
- description: TBD
- metrics:
- - name: mem.numa_nodes
- description: NUMA events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: miss
- - name: local
- - name: foreign
- - name: interleave
- - name: other
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/kernel/mm/ksm
- monitored_instance:
- name: Kernel Same-Page Merging
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ksm
- - samepage
- - merging
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Kernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the
- memory of different processes and identify identical pages. It then merges these identical pages into a
- single page that the processes share. This is particularly useful for virtualization, where multiple virtual
- machines might be running the same operating system or applications and have many identical pages.
-
- The collector provides information about the operation and effectiveness of KSM on your system.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mem.ksm
- description: Kernel Same Page Merging
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: shared
- - name: unshared
- - name: sharing
- - name: volatile
- - name: mem.ksm_savings
- description: Kernel Same Page Merging Savings
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: savings
- - name: offered
- - name: mem.ksm_ratios
- description: Kernel Same Page Merging Effectiveness
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: savings
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/block/zram
- monitored_instance:
- name: ZRAM
- link: ""
- categories:
- - data-collection.linux-systems.memory-metrics
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - zram
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- zRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.
- The data written to this block device is compressed and stored in memory.
-
- The collectors provides information about the operation and the effectiveness of zRAM on your system.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: zram device
- description: ""
- labels:
- - name: device
- description: TBD
- metrics:
- - name: mem.zram_usage
- description: ZRAM Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: compressed
- - name: metadata
- - name: mem.zram_savings
- description: ZRAM Memory Savings
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: savings
- - name: original
- - name: mem.zram_ratio
- description: ZRAM Compression Ratio (original to compressed)
- unit: "ratio"
- chart_type: line
- dimensions:
- - name: ratio
- - name: mem.zram_efficiency
- description: ZRAM Efficiency
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: percent
- - meta:
- plugin_name: proc.plugin
- module_name: ipc
- monitored_instance:
- name: Inter Process Communication
- link: ""
- categories:
- - data-collection.linux-systems.ipc-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ipc
- - semaphores
- - shared memory
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- IPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each
- other and synchronize their actions.
-
- This collector exposes information about:
-
- - Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that
- allows messages to be placed onto a queue and read at a later time.
-
- - Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by
- reading/writing into shared memory segments.
-
- - Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple
- processes are trying to access a single shared resource, semaphores can ensure that only one process
- accesses the resource at a given time.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: semaphores_used
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf
- metric: system.ipc_semaphores
- info: IPC semaphore utilization
- os: "linux"
- - name: semaphore_arrays_used
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipc.conf
- metric: system.ipc_semaphore_arrays
- info: IPC semaphore arrays utilization
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.ipc_semaphores
- description: IPC Semaphores
- unit: "semaphores"
- chart_type: area
- dimensions:
- - name: semaphores
- - name: system.ipc_semaphore_arrays
- description: IPC Semaphore Arrays
- unit: "arrays"
- chart_type: area
- dimensions:
- - name: arrays
- - name: system.message_queue_message
- description: IPC Message Queue Number of Messages
- unit: "messages"
- chart_type: stacked
- dimensions:
- - name: a dimension per queue
- - name: system.message_queue_bytes
- description: IPC Message Queue Used Bytes
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: a dimension per queue
- - name: system.shared_memory_segments
- description: IPC Shared Memory Number of Segments
- unit: "segments"
- chart_type: stacked
- dimensions:
- - name: segments
- - name: system.shared_memory_bytes
- description: IPC Shared Memory Used Bytes
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: bytes
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/diskstats
- monitored_instance:
- name: Disk Statistics
- link: ""
- categories:
- - data-collection.linux-systems.disk-metrics
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - disk
- - disks
- - io
- - bcache
- - block devices
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Detailed statistics for each of your system's disk devices and partitions.
- The data is reported by the kernel and can be used to monitor disk activity on a Linux system.
-
- Get valuable insight into how your disks are performing and where potential bottlenecks might be.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 10min_disk_backlog
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.backlog
- info: average backlog size of the ${label:device} disk over the last 10 minutes
- os: "linux"
- - name: 10min_disk_utilization
- link: https://github.com/netdata/netdata/blob/master/health/health.d/disks.conf
- metric: disk.util
- info: average percentage of time ${label:device} disk was busy over the last 10 minutes
- os: "linux freebsd"
- - name: bcache_cache_dirty
- link: https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf
- metric: disk.bcache_cache_alloc
- info: percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small)
- - name: bcache_cache_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/bcache.conf
- metric: disk.bcache_cache_read_races
- info:
- number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is
- reread from the backing device)
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.io
- description: Disk I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: disk
- description: ""
- labels:
- - name: device
- description: TBD
- - name: mount_point
- description: TBD
- - name: device_type
- description: TBD
- metrics:
- - name: disk.io
- description: Disk I/O Bandwidth
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.io
- description: Amount of Discarded Data
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: discards
- - name: disk.ops
- description: Disk Completed I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.ops
- description: Disk Completed Extended I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: discards
- - name: flushes
- - name: disk.qops
- description: Disk Current I/O Operations
- unit: "operations"
- chart_type: line
- dimensions:
- - name: operations
- - name: disk.backlog
- description: Disk Backlog
- unit: "milliseconds"
- chart_type: area
- dimensions:
- - name: backlog
- - name: disk.busy
- description: Disk Busy Time
- unit: "milliseconds"
- chart_type: area
- dimensions:
- - name: busy
- - name: disk.util
- description: Disk Utilization Time
- unit: "% of time working"
- chart_type: area
- dimensions:
- - name: utilization
- - name: disk.mops
- description: Disk Merged Operations
- unit: "merged operations/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.mops
- description: Disk Merged Discard Operations
- unit: "merged operations/s"
- chart_type: line
- dimensions:
- - name: discards
- - name: disk.iotime
- description: Disk Total I/O Time
- unit: "milliseconds/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.iotime
- description: Disk Total I/O Time for Extended Operations
- unit: "milliseconds/s"
- chart_type: line
- dimensions:
- - name: discards
- - name: flushes
- - name: disk.await
- description: Average Completed I/O Operation Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.await
- description: Average Completed Extended I/O Operation Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: discards
- - name: flushes
- - name: disk.avgsz
- description: Average Completed I/O Operation Bandwidth
- unit: "KiB/operation"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: disk_ext.avgsz
- description: Average Amount of Discarded Data
- unit: "KiB/operation"
- chart_type: area
- dimensions:
- - name: discards
- - name: disk.svctm
- description: Average Service Time
- unit: "milliseconds/operation"
- chart_type: line
- dimensions:
- - name: svctm
- - name: disk.bcache_cache_alloc
- description: BCache Cache Allocations
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: ununsed
- - name: dirty
- - name: clean
- - name: metadata
- - name: undefined
- - name: disk.bcache_hit_ratio
- description: BCache Cache Hit Ratio
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: 5min
- - name: 1hour
- - name: 1day
- - name: ever
- - name: disk.bcache_rates
- description: BCache Rates
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: congested
- - name: writeback
- - name: disk.bcache_size
- description: BCache Cache Sizes
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: dirty
- - name: disk.bcache_usage
- description: BCache Cache Usage
- unit: "percentage"
- chart_type: area
- dimensions:
- - name: avail
- - name: disk.bcache_cache_read_races
- description: BCache Cache Read Races
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: races
- - name: errors
- - name: disk.bcache
- description: BCache Cache I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: misses
- - name: collisions
- - name: readaheads
- - name: disk.bcache_bypass
- description: BCache Cache Bypass I/O Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: misses
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/mdstat
- monitored_instance:
- name: MD RAID
- link: ""
- categories:
- - data-collection.linux-systems.disk-metrics
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - raid
- - mdadm
- - mdstat
- - raid
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors the status of MD RAID devices."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: mdstat_last_collected
- link: https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf
- metric: md.disks
- info: number of seconds since the last successful data collection
- - name: mdstat_disks
- link: https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf
- metric: md.disks
- info:
- number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded.
- - name: mdstat_mismatch_cnt
- link: https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf
- metric: md.mismatch_cnt
- info: number of unsynchronized blocks for the ${label:device} ${label:raid_level} array
- - name: mdstat_nonredundant_last_collected
- link: https://github.com/netdata/netdata/blob/master/health/health.d/mdstat.conf
- metric: md.nonredundant
- info: number of seconds since the last successful data collection
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: md.health
- description: Faulty Devices In MD
- unit: "failed disks"
- chart_type: line
- dimensions:
- - name: a dimension per md array
- - name: md array
- description: ""
- labels:
- - name: device
- description: TBD
- - name: raid_level
- description: TBD
- metrics:
- - name: md.disks
- description: Disks Stats
- unit: "disks"
- chart_type: stacked
- dimensions:
- - name: inuse
- - name: down
- - name: md.mismatch_cnt
- description: Mismatch Count
- unit: "unsynchronized blocks"
- chart_type: line
- dimensions:
- - name: count
- - name: md.status
- description: Current Status
- unit: "percent"
- chart_type: line
- dimensions:
- - name: check
- - name: resync
- - name: recovery
- - name: reshape
- - name: md.expected_time_until_operation_finish
- description: Approximate Time Until Finish
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: finish_in
- - name: md.operation_speed
- description: Operation Speed
- unit: "KiB/s"
- chart_type: line
- dimensions:
- - name: speed
- - name: md.nonredundant
- description: Nonredundant Array Availability
- unit: "boolean"
- chart_type: line
- dimensions:
- - name: available
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/dev
- monitored_instance:
- name: Network interfaces
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - network interfaces
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor network interface metrics about bandwidth, state, errors and more."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: interface_speed
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.net
- info: network interface ${label:device} current speed
- os: "*"
- - name: 1m_received_traffic_overflow
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.net
- info: average inbound utilization for the network interface ${label:device} over the last minute
- os: "linux"
- - name: 1m_sent_traffic_overflow
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.net
- info: average outbound utilization for the network interface ${label:device} over the last minute
- os: "linux"
- - name: inbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "linux"
- - name: outbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "linux"
- - name: wifi_inbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "linux"
- - name: wifi_outbound_packets_dropped_ratio
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.drops
- info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
- os: "linux"
- - name: 1m_received_packets_rate
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
- info: average number of packets received by the network interface ${label:device} over the last minute
- os: "linux freebsd"
- - name: 10s_received_packets_storm
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.packets
- info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute
- os: "linux freebsd"
- - name: 10min_fifo_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/net.conf
- metric: net.fifo
- info: number of FIFO errors for the network interface ${label:device} in the last 10 minutes
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.net
- description: Physical Network Interfaces Aggregated Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: network device
- description: ""
- labels:
- - name: interface_type
- description: TBD
- - name: device
- description: TBD
- metrics:
- - name: net.net
- description: Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: net.speed
- description: Interface Speed
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: speed
- - name: net.duplex
- description: Interface Duplex State
- unit: "state"
- chart_type: line
- dimensions:
- - name: full
- - name: half
- - name: unknown
- - name: net.operstate
- description: Interface Operational State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: notpresent
- - name: lowerlayerdown
- - name: testing
- - name: dormant
- - name: unknown
- - name: net.carrier
- description: Interface Physical Link State
- unit: "state"
- chart_type: line
- dimensions:
- - name: up
- - name: down
- - name: net.mtu
- description: Interface MTU
- unit: "octets"
- chart_type: line
- dimensions:
- - name: mtu
- - name: net.packets
- description: Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: multicast
- - name: net.errors
- description: Interface Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: net.drops
- description: Interface Drops
- unit: "drops/s"
- chart_type: line
- dimensions:
- - name: inbound
- - name: outbound
- - name: net.fifo
- description: Interface FIFO Buffer Errors
- unit: "errors"
- chart_type: line
- dimensions:
- - name: receive
- - name: transmit
- - name: net.compressed
- description: Compressed Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: net.events
- description: Network Interface Events
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: frames
- - name: collisions
- - name: carrier
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/wireless
- monitored_instance:
- name: Wireless network interfaces
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - wireless devices
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor wireless devices with metrics about status, link quality, signal level, noise level and more."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: wireless device
- description: ""
- labels: []
- metrics:
- - name: wireless.status
- description: Internal status reported by interface.
- unit: "status"
- chart_type: line
- dimensions:
- - name: status
- - name: wireless.link_quality
- description: Overall quality of the link. This is an aggregate value, and depends on the driver and hardware.
- unit: "value"
- chart_type: line
- dimensions:
- - name: link_quality
- - name: wireless.signal_level
- description:
- The signal level is the wireless signal power level received by the wireless client. The closer the value is to 0, the stronger the
- signal.
- unit: "dBm"
- chart_type: line
- dimensions:
- - name: signal_level
- - name: wireless.noise_level
- description:
- The noise level indicates the amount of background noise in your environment. The closer the value to 0, the greater the noise level.
- unit: "dBm"
- chart_type: line
- dimensions:
- - name: noise_level
- - name: wireless.discarded_packets
- description: Packet discarded in the wireless adapter due to wireless specific problems.
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: nwid
- - name: crypt
- - name: frag
- - name: retry
- - name: misc
- - name: wireless.missed_beacons
- description: Number of missed beacons.
- unit: "frames/s"
- chart_type: line
- dimensions:
- - name: missed_beacons
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/class/infiniband
- monitored_instance:
- name: InfiniBand
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - infiniband
- - rdma
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors InfiniBand network inteface statistics."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: infiniband port
- description: ""
- labels: []
- metrics:
- - name: ib.bytes
- description: Bandwidth usage
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: Received
- - name: Sent
- - name: ib.packets
- description: Packets Statistics
- unit: "packets/s"
- chart_type: area
- dimensions:
- - name: Received
- - name: Sent
- - name: Mcast_rcvd
- - name: Mcast_sent
- - name: Ucast_rcvd
- - name: Ucast_sent
- - name: ib.errors
- description: Error Counters
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: Pkts_malformated
- - name: Pkts_rcvd_discarded
- - name: Pkts_sent_discarded
- - name: Tick_Wait_to_send
- - name: Pkts_missed_resource
- - name: Buffer_overrun
- - name: Link_Downed
- - name: Link_recovered
- - name: Link_integrity_err
- - name: Link_minor_errors
- - name: Pkts_rcvd_with_EBP
- - name: Pkts_rcvd_discarded_by_switch
- - name: Pkts_sent_discarded_by_switch
- - name: ib.hwerrors
- description: Hardware Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: Duplicated_packets
- - name: Pkt_Seq_Num_gap
- - name: Ack_timer_expired
- - name: Drop_missing_buffer
- - name: Drop_out_of_sequence
- - name: NAK_sequence_rcvd
- - name: CQE_err_Req
- - name: CQE_err_Resp
- - name: CQE_Flushed_err_Req
- - name: CQE_Flushed_err_Resp
- - name: Remote_access_err_Req
- - name: Remote_access_err_Resp
- - name: Remote_invalid_req
- - name: Local_length_err_Resp
- - name: RNR_NAK_Packets
- - name: CNP_Pkts_ignored
- - name: RoCE_ICRC_Errors
- - name: ib.hwpackets
- description: Hardware Packets Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: RoCEv2_Congestion_sent
- - name: RoCEv2_Congestion_rcvd
- - name: IB_Congestion_handled
- - name: ATOMIC_req_rcvd
- - name: Connection_req_rcvd
- - name: Read_req_rcvd
- - name: Write_req_rcvd
- - name: RoCE_retrans_adaptive
- - name: RoCE_retrans_timeout
- - name: RoCE_slow_restart
- - name: RoCE_slow_restart_congestion
- - name: RoCE_slow_restart_count
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/netstat
- monitored_instance:
- name: Network statistics
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ip
- - udp
- - udplite
- - icmp
- - netstat
- - snmp
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides metrics from the `netstat`, `snmp` and `snmp6` modules."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: 1m_tcp_syn_queue_drops
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf
- metric: ip.tcp_syn_queue
- info: average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled)
- os: "linux"
- - name: 1m_tcp_syn_queue_cookies
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf
- metric: ip.tcp_syn_queue
- info: average number of sent SYN cookies due to the full TCP SYN queue over the last minute
- os: "linux"
- - name: 1m_tcp_accept_queue_overflows
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf
- metric: ip.tcp_accept_queue
- info: average number of overflows in the TCP accept queue over the last minute
- os: "linux"
- - name: 1m_tcp_accept_queue_drops
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_listen.conf
- metric: ip.tcp_accept_queue
- info: average number of dropped packets in the TCP accept queue over the last minute
- os: "linux"
- - name: tcp_connections
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_conn.conf
- metric: ip.tcpsock
- info: TCP connections utilization
- os: "linux"
- - name: 1m_ip_tcp_resets_sent
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ip.tcphandshake
- info: average number of sent TCP RESETS over the last minute
- os: "linux"
- - name: 10s_ip_tcp_resets_sent
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ip.tcphandshake
- info:
- average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has
- crashed. Netdata will not send a clear notification for this alarm.
- os: "linux"
- - name: 1m_ip_tcp_resets_received
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ip.tcphandshake
- info: average number of received TCP RESETS over the last minute
- os: "linux freebsd"
- - name: 10s_ip_tcp_resets_received
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_resets.conf
- metric: ip.tcphandshake
- info:
- average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed.
- Netdata will not send a clear notification for this alarm.
- os: "linux freebsd"
- - name: 1m_ipv4_udp_receive_buffer_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf
- metric: ipv4.udperrors
- info: average number of UDP receive buffer errors over the last minute
- os: "linux freebsd"
- - name: 1m_ipv4_udp_send_buffer_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/udp_errors.conf
- metric: ipv4.udperrors
- info: average number of UDP send buffer errors over the last minute
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.ip
- description: IPv4 Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ip.tcpmemorypressures
- description: TCP Memory Pressures
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: pressures
- - name: ip.tcpconnaborts
- description: TCP Connection Aborts
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: baddata
- - name: userclosed
- - name: nomemory
- - name: timeout
- - name: linger
- - name: failed
- - name: ip.tcpreorders
- description: TCP Reordered Packets by Detection Method
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: timestamp
- - name: sack
- - name: fack
- - name: reno
- - name: ip.tcpofo
- description: TCP Out-Of-Order Queue
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: inqueue
- - name: dropped
- - name: merged
- - name: pruned
- - name: ip.tcpsyncookies
- description: TCP SYN Cookies
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: failed
- - name: ip.tcp_syn_queue
- description: TCP SYN Queue Issues
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: drops
- - name: cookies
- - name: ip.tcp_accept_queue
- description: TCP Accept Queue Issues
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: overflows
- - name: drops
- - name: ip.tcpsock
- description: IPv4 TCP Connections
- unit: "active connections"
- chart_type: line
- dimensions:
- - name: connections
- - name: ip.tcppackets
- description: IPv4 TCP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ip.tcperrors
- description: IPv4 TCP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrs
- - name: InCsumErrors
- - name: RetransSegs
- - name: ip.tcpopens
- description: IPv4 TCP Opens
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: active
- - name: passive
- - name: ip.tcphandshake
- description: IPv4 TCP Handshake Issues
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: EstabResets
- - name: OutRsts
- - name: AttemptFails
- - name: SynRetrans
- - name: ipv4.packets
- description: IPv4 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivered
- - name: ipv4.errors
- description: IPv4 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InNoRoutes
- - name: OutNoRoutes
- - name: InHdrErrors
- - name: InAddrErrors
- - name: InTruncatedPkts
- - name: InCsumErrors
- - name: ipc4.bcast
- description: IP Broadcast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.bcastpkts
- description: IP Broadcast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.mcast
- description: IPv4 Multicast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.mcastpkts
- description: IP Multicast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.icmp
- description: IPv4 ICMP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.icmpmsg
- description: IPv4 ICMP Messages
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InEchoReps
- - name: OutEchoReps
- - name: InDestUnreachs
- - name: OutDestUnreachs
- - name: InRedirects
- - name: OutRedirects
- - name: InEchos
- - name: OutEchos
- - name: InRouterAdvert
- - name: OutRouterAdvert
- - name: InRouterSelect
- - name: OutRouterSelect
- - name: InTimeExcds
- - name: OutTimeExcds
- - name: InParmProbs
- - name: OutParmProbs
- - name: InTimestamps
- - name: OutTimestamps
- - name: InTimestampReps
- - name: OutTimestampReps
- - name: ipv4.icmp_errors
- description: IPv4 ICMP Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: ipv4.udppackets
- description: IPv4 UDP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.udperrors
- description: IPv4 UDP Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: RcvbufErrors
- - name: SndbufErrors
- - name: InErrors
- - name: NoPorts
- - name: InCsumErrors
- - name: IgnoredMulti
- - name: ipv4.udplite
- description: IPv4 UDPLite Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv4.udplite_errors
- description: IPv4 UDPLite Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: RcvbufErrors
- - name: SndbufErrors
- - name: InErrors
- - name: NoPorts
- - name: InCsumErrors
- - name: IgnoredMulti
- - name: ipv4.ecnpkts
- description: IP ECN Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: CEP
- - name: NoECTP
- - name: ECTP0
- - name: ECTP1
- - name: ipv4.fragsin
- description: IPv4 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - name: ipv4.fragsout
- description: IPv4 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: created
- - name: system.ipv6
- description: IPv6 Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.packets
- description: IPv6 Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: forwarded
- - name: delivers
- - name: ipv6.errors
- description: IPv6 Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InDiscards
- - name: OutDiscards
- - name: InHdrErrors
- - name: InAddrErrors
- - name: InUnknownProtos
- - name: InTooBigErrors
- - name: InTruncatedPkts
- - name: InNoRoutes
- - name: OutNoRoutes
- - name: ipv6.bcast
- description: IPv6 Broadcast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.mcast
- description: IPv6 Multicast Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.mcastpkts
- description: IPv6 Multicast Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.udppackets
- description: IPv6 UDP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.udperrors
- description: IPv6 UDP Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: RcvbufErrors
- - name: SndbufErrors
- - name: InErrors
- - name: NoPorts
- - name: InCsumErrors
- - name: IgnoredMulti
- - name: ipv6.udplitepackets
- description: IPv6 UDPlite Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.udpliteerrors
- description: IPv6 UDP Lite Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: RcvbufErrors
- - name: SndbufErrors
- - name: InErrors
- - name: NoPorts
- - name: InCsumErrors
- - name: ipv6.icmp
- description: IPv6 ICMP Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmpredir
- description: IPv6 ICMP Redirects
- unit: "redirects/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmperrors
- description: IPv6 ICMP Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: InErrors
- - name: OutErrors
- - name: InCsumErrors
- - name: InDestUnreachs
- - name: InPktTooBigs
- - name: InTimeExcds
- - name: InParmProblems
- - name: OutDestUnreachs
- - name: OutPktTooBigs
- - name: OutTimeExcds
- - name: OutParmProblems
- - name: ipv6.icmpechos
- description: IPv6 ICMP Echo
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InEchos
- - name: OutEchos
- - name: InEchoReplies
- - name: OutEchoReplies
- - name: ipv6.groupmemb
- description: IPv6 ICMP Group Membership
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InQueries
- - name: OutQueries
- - name: InResponses
- - name: OutResponses
- - name: InReductions
- - name: OutReductions
- - name: ipv6.icmprouter
- description: IPv6 Router Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmpneighbor
- description: IPv6 Neighbor Messages
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InSolicits
- - name: OutSolicits
- - name: InAdvertisements
- - name: OutAdvertisements
- - name: ipv6.icmpmldv2
- description: IPv6 ICMP MLDv2 Reports
- unit: "reports/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipv6.icmptypes
- description: IPv6 ICMP Types
- unit: "messages/s"
- chart_type: line
- dimensions:
- - name: InType1
- - name: InType128
- - name: InType129
- - name: InType136
- - name: OutType1
- - name: OutType128
- - name: OutType129
- - name: OutType133
- - name: OutType135
- - name: OutType143
- - name: ipv6.ect
- description: IPv6 ECT Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InNoECTPkts
- - name: InECT1Pkts
- - name: InECT0Pkts
- - name: InCEPkts
- - name: ipv6.ect
- description: IPv6 ECT Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: InNoECTPkts
- - name: InECT1Pkts
- - name: InECT0Pkts
- - name: InCEPkts
- - name: ipv6.fragsin
- description: IPv6 Fragments Reassembly
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: timeout
- - name: all
- - name: ipv6.fragsout
- description: IPv6 Fragments Sent
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: ok
- - name: failed
- - name: all
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/sockstat
- monitored_instance:
- name: Socket statistics
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sockets
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides socket statistics."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: tcp_orphans
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_orphans.conf
- metric: ipv4.sockstat_tcp_sockets
- info: orphan IPv4 TCP sockets utilization
- os: "linux"
- - name: tcp_memory
- link: https://github.com/netdata/netdata/blob/master/health/health.d/tcp_mem.conf
- metric: ipv4.sockstat_tcp_mem
- info: TCP memory utilization
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: ip.sockstat_sockets
- description: Sockets used for all address families
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: used
- - name: ipv4.sockstat_tcp_sockets
- description: IPv4 TCP Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: alloc
- - name: orphan
- - name: inuse
- - name: timewait
- - name: ipv4.sockstat_tcp_mem
- description: IPv4 TCP Sockets Memory
- unit: "KiB"
- chart_type: area
- dimensions:
- - name: mem
- - name: ipv4.sockstat_udp_sockets
- description: IPv4 UDP Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv4.sockstat_udp_mem
- description: IPv4 UDP Sockets Memory
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: mem
- - name: ipv4.sockstat_udplite_sockets
- description: IPv4 UDPLITE Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv4.sockstat_raw_sockets
- description: IPv4 RAW Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv4.sockstat_frag_sockets
- description: IPv4 FRAG Sockets
- unit: "fragments"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv4.sockstat_frag_mem
- description: IPv4 FRAG Sockets Memory
- unit: "KiB"
- chart_type: area
- dimensions:
- - name: mem
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/sockstat6
- monitored_instance:
- name: IPv6 Socket Statistics
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ipv6 sockets
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides IPv6 socket statistics."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: ipv6.sockstat6_tcp_sockets
- description: IPv6 TCP Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv6.sockstat6_udp_sockets
- description: IPv6 UDP Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv6.sockstat6_udplite_sockets
- description: IPv6 UDPLITE Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv6.sockstat6_raw_sockets
- description: IPv6 RAW Sockets
- unit: "sockets"
- chart_type: line
- dimensions:
- - name: inuse
- - name: ipv6.sockstat6_frag_sockets
- description: IPv6 FRAG Sockets
- unit: "fragments"
- chart_type: line
- dimensions:
- - name: inuse
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/ip_vs_stats
- monitored_instance:
- name: IP Virtual Server
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ip virtual server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors IP Virtual Server statistics"
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: ipvs.sockets
- description: IPVS New Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connections
- - name: ipvs.packets
- description: IPVS Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: ipvs.net
- description: IPVS Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: received
- - name: sent
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/rpc/nfs
- monitored_instance:
- name: NFS Client
- link: ""
- categories:
- - data-collection.linux-systems.filesystem-metrics.nfs
- icon_filename: "nfs.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - nfs client
- - filesystem
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides statistics from the Linux kernel's NFS Client."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: nfs.net
- description: NFS Client Network
- unit: "operations/s"
- chart_type: stacked
- dimensions:
- - name: udp
- - name: tcp
- - name: nfs.rpc
- description: NFS Client Remote Procedure Calls Statistics
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: calls
- - name: retransmits
- - name: auth_refresh
- - name: nfs.proc2
- description: NFS v2 Client Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc2 call
- - name: nfs.proc3
- description: NFS v3 Client Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc3 call
- - name: nfs.proc4
- description: NFS v4 Client Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc4 call
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/rpc/nfsd
- monitored_instance:
- name: NFS Server
- link: ""
- categories:
- - data-collection.linux-systems.filesystem-metrics.nfs
- icon_filename: "nfs.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - nfs server
- - filesystem
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides statistics from the Linux kernel's NFS Server."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: nfsd.readcache
- description: NFS Server Read Cache
- unit: "reads/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: nocache
- - name: nfsd.filehandles
- description: NFS Server File Handles
- unit: "handles/s"
- chart_type: line
- dimensions:
- - name: stale
- - name: nfsd.io
- description: NFS Server I/O
- unit: "kilobytes/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: nfsd.threads
- description: NFS Server Threads
- unit: "threads"
- chart_type: line
- dimensions:
- - name: threads
- - name: nfsd.net
- description: NFS Server Network Statistics
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: udp
- - name: tcp
- - name: nfsd.rpc
- description: NFS Server Remote Procedure Calls Statistics
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: calls
- - name: bad_format
- - name: bad_auth
- - name: nfsd.proc2
- description: NFS v2 Server Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc2 call
- - name: nfsd.proc3
- description: NFS v3 Server Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc3 call
- - name: nfsd.proc4
- description: NFS v4 Server Remote Procedure Calls
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc4 call
- - name: nfsd.proc4ops
- description: NFS v4 Server Operations
- unit: "operations/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per proc4 operation
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/sctp/snmp
- monitored_instance:
- name: SCTP Statistics
- link: ""
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "network-wired.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sctp
- - stream control transmission protocol
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides statistics about the Stream Control Transmission Protocol (SCTP)."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: sctp.established
- description: SCTP current total number of established associations
- unit: "associations"
- chart_type: line
- dimensions:
- - name: established
- - name: sctp.transitions
- description: SCTP Association Transitions
- unit: "transitions/s"
- chart_type: line
- dimensions:
- - name: active
- - name: passive
- - name: aborted
- - name: shutdown
- - name: sctp.packets
- description: SCTP Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: sctp.packet_errors
- description: SCTP Packet Errors
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: invalid
- - name: checksum
- - name: sctp.fragmentation
- description: SCTP Fragmentation
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: reassembled
- - name: fragmented
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/stat/nf_conntrack
- monitored_instance:
- name: Conntrack
- link: ""
- categories:
- - data-collection.linux-systems.firewall-metrics
- icon_filename: "firewall.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - connection tracking mechanism
- - netfilter
- - conntrack
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: netfilter_conntrack_full
- link: https://github.com/netdata/netdata/blob/master/health/health.d/netfilter.conf
- metric: netfilter.conntrack_sockets
- info: netfilter connection tracker table size utilization
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: netfilter.conntrack_sockets
- description: Connection Tracker Connections
- unit: "active connections"
- chart_type: line
- dimensions:
- - name: connections
- - name: netfilter.conntrack_new
- description: Connection Tracker New Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: new
- - name: ignore
- - name: invalid
- - name: netfilter.conntrack_changes
- description: Connection Tracker Changes
- unit: "changes/s"
- chart_type: line
- dimensions:
- - name: inserted
- - name: deleted
- - name: delete_list
- - name: netfilter.conntrack_expect
- description: Connection Tracker Expectations
- unit: "expectations/s"
- chart_type: line
- dimensions:
- - name: created
- - name: deleted
- - name: new
- - name: netfilter.conntrack_search
- description: Connection Tracker Searches
- unit: "searches/s"
- chart_type: line
- dimensions:
- - name: searched
- - name: restarted
- - name: found
- - name: netfilter.conntrack_errors
- description: Connection Tracker Errors
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: icmp_error
- - name: error_failed
- - name: drop
- - name: early_drop
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/net/stat/synproxy
- monitored_instance:
- name: Synproxy
- link: ""
- categories:
- - data-collection.linux-systems.firewall-metrics
- icon_filename: "firewall.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - synproxy
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides statistics about the Synproxy netfilter module."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: netfilter.synproxy_syn_received
- description: SYNPROXY SYN Packets received
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: netfilter.synproxy_conn_reopened
- description: SYNPROXY Connections Reopened
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: reopened
- - name: netfilter.synproxy_cookies
- description: SYNPROXY TCP Cookies
- unit: "cookies/s"
- chart_type: line
- dimensions:
- - name: valid
- - name: invalid
- - name: retransmits
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/spl/kstat/zfs
- monitored_instance:
- name: ZFS Pools
- link: ""
- categories:
- - data-collection.linux-systems.filesystem-metrics.zfs
- icon_filename: "filesystem.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - zfs pools
- - pools
- - zfs
- - filesystem
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides metrics about the state of ZFS pools."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: zfs_pool_state_warn
- link: https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf
- metric: zfspool.state
- info: ZFS pool ${label:pool} state is degraded
- - name: zfs_pool_state_crit
- link: https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf
- metric: zfspool.state
- info: ZFS pool ${label:pool} state is faulted or unavail
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: zfs pool
- description: ""
- labels:
- - name: pool
- description: TBD
- metrics:
- - name: zfspool.state
- description: ZFS pool state
- unit: "boolean"
- chart_type: line
- dimensions:
- - name: online
- - name: degraded
- - name: faulted
- - name: offline
- - name: removed
- - name: unavail
- - name: suspended
- - meta:
- plugin_name: proc.plugin
- module_name: /proc/spl/kstat/zfs/arcstats
- monitored_instance:
- name: ZFS Adaptive Replacement Cache
- link: ""
- categories:
- - data-collection.linux-systems.filesystem-metrics.zfs
- icon_filename: "filesystem.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - zfs arc
- - arc
- - zfs
- - filesystem
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors ZFS Adadptive Replacement Cache (ARC) statistics."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: zfs_memory_throttle
- link: https://github.com/netdata/netdata/blob/master/health/health.d/zfs.conf
- metric: zfs.memory_ops
- info: number of times ZFS had to limit the ARC growth in the last 10 minutes
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: zfs.arc_size
- description: ZFS ARC Size
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: arcsz
- - name: target
- - name: min
- - name: max
- - name: zfs.l2_size
- description: ZFS L2 ARC Size
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: actual
- - name: size
- - name: zfs.reads
- description: ZFS Reads
- unit: "reads/s"
- chart_type: area
- dimensions:
- - name: arc
- - name: demand
- - name: prefetch
- - name: metadata
- - name: l2
- - name: zfs.bytes
- description: ZFS ARC L2 Read/Write Rate
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: zfs.hits
- description: ZFS ARC Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.hits_rate
- description: ZFS ARC Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.dhits
- description: ZFS Demand Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.dhits_rate
- description: ZFS Demand Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.phits
- description: ZFS Prefetch Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.phits_rate
- description: ZFS Prefetch Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.mhits
- description: ZFS Metadata Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.mhits_rate
- description: ZFS Metadata Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.l2hits
- description: ZFS L2 Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.l2hits_rate
- description: ZFS L2 Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.list_hits
- description: ZFS List Hits
- unit: "hits/s"
- chart_type: area
- dimensions:
- - name: mfu
- - name: mfu_ghost
- - name: mru
- - name: mru_ghost
- - name: zfs.arc_size_breakdown
- description: ZFS ARC Size Breakdown
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: recent
- - name: frequent
- - name: zfs.memory_ops
- description: ZFS Memory Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: direct
- - name: throttled
- - name: indirect
- - name: zfs.important_ops
- description: ZFS Important Operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: evict_skip
- - name: deleted
- - name: mutex_miss
- - name: hash_collisions
- - name: zfs.actual_hits
- description: ZFS Actual Cache Hits
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.actual_hits_rate
- description: ZFS Actual Cache Hits Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.demand_data_hits
- description: ZFS Data Demand Efficiency
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.demand_data_hits_rate
- description: ZFS Data Demand Efficiency Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.prefetch_data_hits
- description: ZFS Data Prefetch Efficiency
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.prefetch_data_hits_rate
- description: ZFS Data Prefetch Efficiency Rate
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: zfs.hash_elements
- description: ZFS ARC Hash Elements
- unit: "elements"
- chart_type: line
- dimensions:
- - name: current
- - name: max
- - name: zfs.hash_chains
- description: ZFS ARC Hash Chains
- unit: "chains"
- chart_type: line
- dimensions:
- - name: current
- - name: max
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/fs/btrfs
- monitored_instance:
- name: BTRFS
- link: ""
- categories:
- - data-collection.linux-systems.filesystem-metrics.btrfs
- icon_filename: "filesystem.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - btrfs
- - filesystem
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration provides usage and error statistics from the BTRFS filesystem."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: btrfs_allocated
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.disk
- info: percentage of allocated BTRFS physical disk space
- os: "*"
- - name: btrfs_data
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.data
- info: utilization of BTRFS data space
- os: "*"
- - name: btrfs_metadata
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.metadata
- info: utilization of BTRFS metadata space
- os: "*"
- - name: btrfs_system
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.system
- info: utilization of BTRFS system space
- os: "*"
- - name: btrfs_device_read_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.device_errors
- info: number of encountered BTRFS read errors
- os: "*"
- - name: btrfs_device_write_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.device_errors
- info: number of encountered BTRFS write errors
- os: "*"
- - name: btrfs_device_flush_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.device_errors
- info: number of encountered BTRFS flush errors
- os: "*"
- - name: btrfs_device_corruption_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.device_errors
- info: number of encountered BTRFS corruption errors
- os: "*"
- - name: btrfs_device_generation_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/btrfs.conf
- metric: btrfs.device_errors
- info: number of encountered BTRFS generation errors
- os: "*"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: btrfs filesystem
- description: ""
- labels:
- - name: filesystem_uuid
- description: TBD
- - name: filesystem_label
- description: TBD
- metrics:
- - name: btrfs.disk
- description: BTRFS Physical Disk Allocation
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: unallocated
- - name: data_free
- - name: data_used
- - name: meta_free
- - name: meta_used
- - name: sys_free
- - name: sys_used
- - name: btrfs.data
- description: BTRFS Data Allocation
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: btrfs.metadata
- description: BTRFS Metadata Allocation
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: reserved
- - name: btrfs.system
- description: BTRFS System Allocation
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: btrfs.commits
- description: BTRFS Commits
- unit: "commits"
- chart_type: line
- dimensions:
- - name: commits
- - name: btrfs.commits_perc_time
- description: BTRFS Commits Time Share
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: commits
- - name: btrfs.commit_timings
- description: BTRFS Commit Timings
- unit: "ms"
- chart_type: line
- dimensions:
- - name: last
- - name: max
- - name: btrfs device
- description: ""
- labels:
- - name: device_id
- description: TBD
- - name: filesystem_uuid
- description: TBD
- - name: filesystem_label
- description: TBD
- metrics:
- - name: btrfs.device_errors
- description: BTRFS Device Errors
- unit: "errors"
- chart_type: line
- dimensions:
- - name: write_errs
- - name: read_errs
- - name: flush_errs
- - name: corruption_errs
- - name: generation_errs
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/class/power_supply
- monitored_instance:
- name: Power Supply
- link: ""
- categories:
- - data-collection.linux-systems.power-supply-metrics
- icon_filename: "powersupply.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - psu
- - power supply
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors Power supply metrics, such as battery status, AC power status and more."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: linux_power_supply_capacity
- link: https://github.com/netdata/netdata/blob/master/health/health.d/linux_power_supply.conf
- metric: powersupply.capacity
- info: percentage of remaining power supply capacity
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: power device
- description: ""
- labels:
- - name: device
- description: TBD
- metrics:
- - name: powersupply.capacity
- description: Battery capacity
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: capacity
- - name: powersupply.charge
- description: Battery charge
- unit: "Ah"
- chart_type: line
- dimensions:
- - name: empty_design
- - name: empty
- - name: now
- - name: full
- - name: full_design
- - name: powersupply.energy
- description: Battery energy
- unit: "Wh"
- chart_type: line
- dimensions:
- - name: empty_design
- - name: empty
- - name: now
- - name: full
- - name: full_design
- - name: powersupply.voltage
- description: Power supply voltage
- unit: "V"
- chart_type: line
- dimensions:
- - name: min_design
- - name: min
- - name: now
- - name: max
- - name: max_design
- - meta:
- plugin_name: proc.plugin
- module_name: /sys/class/drm
- monitored_instance:
- name: AMD GPU
- link: "https://www.amd.com"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: amd.svg
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - amd
- - gpu
- - hardware
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage."
- method_description: "It reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: gpu
- description: "These metrics refer to the GPU."
- labels:
- - name: product_name
- description: GPU product name (e.g. AMD RX 6600)
- metrics:
- - name: amdgpu.gpu_utilization
- description: GPU utilization
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: utilization
- - name: amdgpu.gpu_mem_utilization
- description: GPU memory utilization
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: utilization
- - name: amdgpu.gpu_clk_frequency
- description: GPU clock frequency
- unit: "MHz"
- chart_type: line
- dimensions:
- - name: frequency
- - name: amdgpu.gpu_mem_clk_frequency
- description: GPU memory clock frequency
- unit: "MHz"
- chart_type: line
- dimensions:
- - name: frequency
- - name: amdgpu.gpu_mem_vram_usage_perc
- description: VRAM memory usage percentage
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: usage
- - name: amdgpu.gpu_mem_vram_usage
- description: VRAM memory usage
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: free
- - name: used
- - name: amdgpu.gpu_mem_vis_vram_usage_perc
- description: visible VRAM memory usage percentage
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: usage
- - name: amdgpu.gpu_mem_vis_vram_usage
- description: visible VRAM memory usage
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: free
- - name: used
- - name: amdgpu.gpu_mem_gtt_usage_perc
- description: GTT memory usage percentage
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: usage
- - name: amdgpu.gpu_mem_gtt_usage
- description: GTT memory usage
- unit: "bytes"
- chart_type: area
- dimensions:
- - name: free
- - name: used
diff --git a/collectors/proc.plugin/plugin_proc.c b/collectors/proc.plugin/plugin_proc.c
deleted file mode 100644
index 3f11aaf6c..000000000
--- a/collectors/proc.plugin/plugin_proc.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-static struct proc_module {
- const char *name;
- const char *dim;
-
- int enabled;
-
- int (*func)(int update_every, usec_t dt);
-
- RRDDIM *rd;
-
-} proc_modules[] = {
-
- // system metrics
- {.name = "/proc/stat", .dim = "stat", .func = do_proc_stat},
- {.name = "/proc/uptime", .dim = "uptime", .func = do_proc_uptime},
- {.name = "/proc/loadavg", .dim = "loadavg", .func = do_proc_loadavg},
- {.name = "/proc/sys/fs/file-nr", .dim = "file-nr", .func = do_proc_sys_fs_file_nr},
- {.name = "/proc/sys/kernel/random/entropy_avail", .dim = "entropy", .func = do_proc_sys_kernel_random_entropy_avail},
-
- // pressure metrics
- {.name = "/proc/pressure", .dim = "pressure", .func = do_proc_pressure},
-
- // CPU metrics
- {.name = "/proc/interrupts", .dim = "interrupts", .func = do_proc_interrupts},
- {.name = "/proc/softirqs", .dim = "softirqs", .func = do_proc_softirqs},
-
- // memory metrics
- {.name = "/proc/vmstat", .dim = "vmstat", .func = do_proc_vmstat},
- {.name = "/proc/meminfo", .dim = "meminfo", .func = do_proc_meminfo},
- {.name = "/sys/kernel/mm/ksm", .dim = "ksm", .func = do_sys_kernel_mm_ksm},
- {.name = "/sys/block/zram", .dim = "zram", .func = do_sys_block_zram},
- {.name = "/sys/devices/system/edac/mc", .dim = "edac", .func = do_proc_sys_devices_system_edac_mc},
- {.name = "/sys/devices/pci/aer", .dim = "pci_aer", .func = do_proc_sys_devices_pci_aer},
- {.name = "/sys/devices/system/node", .dim = "numa", .func = do_proc_sys_devices_system_node},
- {.name = "/proc/pagetypeinfo", .dim = "pagetypeinfo", .func = do_proc_pagetypeinfo},
-
- // network metrics
- {.name = "/proc/net/wireless", .dim = "netwireless", .func = do_proc_net_wireless},
- {.name = "/proc/net/sockstat", .dim = "sockstat", .func = do_proc_net_sockstat},
- {.name = "/proc/net/sockstat6", .dim = "sockstat6", .func = do_proc_net_sockstat6},
- {.name = "/proc/net/netstat", .dim = "netstat", .func = do_proc_net_netstat},
- {.name = "/proc/net/sctp/snmp", .dim = "sctp", .func = do_proc_net_sctp_snmp},
- {.name = "/proc/net/softnet_stat", .dim = "softnet", .func = do_proc_net_softnet_stat},
- {.name = "/proc/net/ip_vs/stats", .dim = "ipvs", .func = do_proc_net_ip_vs_stats},
- {.name = "/sys/class/infiniband", .dim = "infiniband", .func = do_sys_class_infiniband},
-
- // firewall metrics
- {.name = "/proc/net/stat/conntrack", .dim = "conntrack", .func = do_proc_net_stat_conntrack},
- {.name = "/proc/net/stat/synproxy", .dim = "synproxy", .func = do_proc_net_stat_synproxy},
-
- // disk metrics
- {.name = "/proc/diskstats", .dim = "diskstats", .func = do_proc_diskstats},
- {.name = "/proc/mdstat", .dim = "mdstat", .func = do_proc_mdstat},
-
- // NFS metrics
- {.name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd},
- {.name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs},
-
- // ZFS metrics
- {.name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats},
- {.name = "/proc/spl/kstat/zfs/pool/state",.dim = "zfs_pool_state",.func = do_proc_spl_kstat_zfs_pool_state},
-
- // BTRFS metrics
- {.name = "/sys/fs/btrfs", .dim = "btrfs", .func = do_sys_fs_btrfs},
-
- // IPC metrics
- {.name = "ipc", .dim = "ipc", .func = do_ipc},
-
- // linux power supply metrics
- {.name = "/sys/class/power_supply", .dim = "power_supply", .func = do_sys_class_power_supply},
-
- // GPU metrics
- {.name = "/sys/class/drm", .dim = "drm", .func = do_sys_class_drm},
-
- // the terminator of this array
- {.name = NULL, .dim = NULL, .func = NULL}
-};
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 36
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 36
-#endif
-
-static netdata_thread_t *netdev_thread = NULL;
-
-static void proc_main_cleanup(void *ptr)
-{
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- if (netdev_thread) {
- netdata_thread_join(*netdev_thread, NULL);
- freez(netdev_thread);
- }
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-
- worker_unregister();
-}
-
-bool inside_lxc_container = false;
-
-static bool is_lxcfs_proc_mounted() {
- procfile *ff = NULL;
-
- if (unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "/proc/self/mounts");
- ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff))
- return false;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff))
- return false;
-
- unsigned long l, lines = procfile_lines(ff);
-
- for (l = 0; l < lines; l++) {
- size_t words = procfile_linewords(ff, l);
- if (words < 2) {
- continue;
- }
- if (!strcmp(procfile_lineword(ff, l, 0), "lxcfs") && !strncmp(procfile_lineword(ff, l, 1), "/proc", 5)) {
- procfile_close(ff);
- return true;
- }
- }
-
- procfile_close(ff);
-
- return false;
-}
-
-static bool log_proc_module(BUFFER *wb, void *data) {
- struct proc_module *pm = data;
- buffer_sprintf(wb, "proc.plugin[%s]", pm->name);
- return true;
-}
-
-void *proc_main(void *ptr)
-{
- worker_register("PROC");
-
- rrd_collector_started();
-
- if (config_get_boolean("plugin:proc", "/proc/net/dev", CONFIG_BOOLEAN_YES)) {
- netdev_thread = mallocz(sizeof(netdata_thread_t));
- netdata_log_debug(D_SYSTEM, "Starting thread %s.", THREAD_NETDEV_NAME);
- netdata_thread_create(
- netdev_thread, THREAD_NETDEV_NAME, NETDATA_THREAD_OPTION_JOINABLE, netdev_main, netdev_thread);
- }
-
- netdata_thread_cleanup_push(proc_main_cleanup, ptr);
-
- {
- config_get_boolean("plugin:proc", "/proc/pagetypeinfo", CONFIG_BOOLEAN_NO);
-
- // check the enabled status for each module
- int i;
- for(i = 0; proc_modules[i].name; i++) {
- struct proc_module *pm = &proc_modules[i];
-
- pm->enabled = config_get_boolean("plugin:proc", pm->name, CONFIG_BOOLEAN_YES);
- pm->rd = NULL;
-
- worker_register_job_name(i, proc_modules[i].dim);
- }
-
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- inside_lxc_container = is_lxcfs_proc_mounted();
-
-#define LGS_MODULE_ID 0
-
- ND_LOG_STACK lgs[] = {
- [LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin"),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- while(service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
-
- if(unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- for(i = 0; proc_modules[i].name; i++) {
- if(unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- struct proc_module *pm = &proc_modules[i];
- if(unlikely(!pm->enabled))
- continue;
-
- worker_is_busy(i);
- lgs[LGS_MODULE_ID] = ND_LOG_FIELD_CB(NDF_MODULE, log_proc_module, pm);
- pm->enabled = !pm->func(localhost->rrd_update_every, hb_dt);
- lgs[LGS_MODULE_ID] = ND_LOG_FIELD_TXT(NDF_MODULE, "proc.plugin");
- }
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-int get_numa_node_count(void)
-{
- static int numa_node_count = -1;
-
- if (numa_node_count != -1)
- return numa_node_count;
-
- numa_node_count = 0;
-
- char name[FILENAME_MAX + 1];
- snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
- char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
-
- DIR *dir = opendir(dirname);
- if (dir) {
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type != DT_DIR)
- continue;
-
- if (strncmp(de->d_name, "node", 4) != 0)
- continue;
-
- if (!isdigit(de->d_name[4]))
- continue;
-
- numa_node_count++;
- }
- closedir(dir);
- }
-
- return numa_node_count;
-}
diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h
deleted file mode 100644
index e4fc105ba..000000000
--- a/collectors/proc.plugin/plugin_proc.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGIN_PROC_H
-#define NETDATA_PLUGIN_PROC_H 1
-
-#include "daemon/common.h"
-
-#define PLUGIN_PROC_CONFIG_NAME "proc"
-#define PLUGIN_PROC_NAME PLUGIN_PROC_CONFIG_NAME ".plugin"
-
-#define THREAD_NETDEV_NAME "P[proc netdev]"
-void *netdev_main(void *ptr);
-
-int do_proc_net_wireless(int update_every, usec_t dt);
-int do_proc_diskstats(int update_every, usec_t dt);
-int do_proc_mdstat(int update_every, usec_t dt);
-int do_proc_net_netstat(int update_every, usec_t dt);
-int do_proc_net_stat_conntrack(int update_every, usec_t dt);
-int do_proc_net_ip_vs_stats(int update_every, usec_t dt);
-int do_proc_stat(int update_every, usec_t dt);
-int do_proc_meminfo(int update_every, usec_t dt);
-int do_proc_vmstat(int update_every, usec_t dt);
-int do_proc_net_rpc_nfs(int update_every, usec_t dt);
-int do_proc_net_rpc_nfsd(int update_every, usec_t dt);
-int do_proc_sys_fs_file_nr(int update_every, usec_t dt);
-int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt);
-int do_proc_interrupts(int update_every, usec_t dt);
-int do_proc_softirqs(int update_every, usec_t dt);
-int do_proc_pressure(int update_every, usec_t dt);
-int do_sys_kernel_mm_ksm(int update_every, usec_t dt);
-int do_sys_block_zram(int update_every, usec_t dt);
-int do_proc_loadavg(int update_every, usec_t dt);
-int do_proc_net_stat_synproxy(int update_every, usec_t dt);
-int do_proc_net_softnet_stat(int update_every, usec_t dt);
-int do_proc_uptime(int update_every, usec_t dt);
-int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
-int do_proc_sys_devices_pci_aer(int update_every, usec_t dt);
-int do_proc_sys_devices_system_node(int update_every, usec_t dt);
-int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
-int do_proc_spl_kstat_zfs_pool_state(int update_every, usec_t dt);
-int do_sys_fs_btrfs(int update_every, usec_t dt);
-int do_proc_net_sockstat(int update_every, usec_t dt);
-int do_proc_net_sockstat6(int update_every, usec_t dt);
-int do_proc_net_sctp_snmp(int update_every, usec_t dt);
-int do_ipc(int update_every, usec_t dt);
-int do_sys_class_power_supply(int update_every, usec_t dt);
-int do_proc_pagetypeinfo(int update_every, usec_t dt);
-int do_sys_class_infiniband(int update_every, usec_t dt);
-int do_sys_class_drm(int update_every, usec_t dt);
-int get_numa_node_count(void);
-
-// metrics that need to be shared among data collectors
-extern unsigned long long zfs_arcstats_shrinkable_cache_size_bytes;
-extern bool inside_lxc_container;
-
-// netdev renames
-void netdev_rename_device_add(
- const char *host_device,
- const char *container_device,
- const char *container_name,
- RRDLABELS *labels,
- const char *ctx_prefix,
- const DICTIONARY_ITEM *cgroup_netdev_link);
-
-void netdev_rename_device_del(const char *host_device);
-
-#include "proc_self_mountinfo.h"
-#include "proc_pressure.h"
-#include "zfs_common.h"
-
-#endif /* NETDATA_PLUGIN_PROC_H */
diff --git a/collectors/proc.plugin/proc_diskstats.c b/collectors/proc.plugin/proc_diskstats.c
deleted file mode 100644
index 475d90835..000000000
--- a/collectors/proc.plugin/proc_diskstats.c
+++ /dev/null
@@ -1,2515 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define RRD_TYPE_DISK "disk"
-#define PLUGIN_PROC_MODULE_DISKSTATS_NAME "/proc/diskstats"
-#define CONFIG_SECTION_PLUGIN_PROC_DISKSTATS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_DISKSTATS_NAME
-
-#define RRDFUNCTIONS_DISKSTATS_HELP "View block device statistics"
-
-#define DISK_TYPE_UNKNOWN 0
-#define DISK_TYPE_PHYSICAL 1
-#define DISK_TYPE_PARTITION 2
-#define DISK_TYPE_VIRTUAL 3
-
-#define DEFAULT_PREFERRED_IDS "*"
-#define DEFAULT_EXCLUDED_DISKS "loop* ram*"
-
-static netdata_mutex_t diskstats_dev_mutex = NETDATA_MUTEX_INITIALIZER;
-
-static struct disk {
- char *disk; // the name of the disk (sda, sdb, etc, after being looked up)
- char *device; // the device of the disk (before being looked up)
- char *disk_by_id;
- char *model;
- char *serial;
-// bool rotational;
-// bool removable;
- uint32_t hash;
- unsigned long major;
- unsigned long minor;
- int sector_size;
- int type;
-
- bool excluded;
- bool function_ready;
-
- char *mount_point;
-
- char *chart_id;
-
- // disk options caching
- int do_io;
- int do_ops;
- int do_mops;
- int do_iotime;
- int do_qops;
- int do_util;
- int do_ext;
- int do_backlog;
- int do_bcache;
-
- int updated;
-
- int device_is_bcache;
-
- char *bcache_filename_dirty_data;
- char *bcache_filename_writeback_rate;
- char *bcache_filename_cache_congested;
- char *bcache_filename_cache_available_percent;
- char *bcache_filename_stats_five_minute_cache_hit_ratio;
- char *bcache_filename_stats_hour_cache_hit_ratio;
- char *bcache_filename_stats_day_cache_hit_ratio;
- char *bcache_filename_stats_total_cache_hit_ratio;
- char *bcache_filename_stats_total_cache_hits;
- char *bcache_filename_stats_total_cache_misses;
- char *bcache_filename_stats_total_cache_miss_collisions;
- char *bcache_filename_stats_total_cache_bypass_hits;
- char *bcache_filename_stats_total_cache_bypass_misses;
- char *bcache_filename_stats_total_cache_readaheads;
- char *bcache_filename_cache_read_races;
- char *bcache_filename_cache_io_errors;
- char *bcache_filename_priority_stats;
-
- usec_t bcache_priority_stats_update_every_usec;
- usec_t bcache_priority_stats_elapsed_usec;
-
- RRDSET *st_io;
- RRDDIM *rd_io_reads;
- RRDDIM *rd_io_writes;
-
- RRDSET *st_ext_io;
- RRDDIM *rd_io_discards;
-
- RRDSET *st_ops;
- RRDDIM *rd_ops_reads;
- RRDDIM *rd_ops_writes;
-
- RRDSET *st_ext_ops;
- RRDDIM *rd_ops_discards;
- RRDDIM *rd_ops_flushes;
-
- RRDSET *st_qops;
- RRDDIM *rd_qops_operations;
-
- RRDSET *st_backlog;
- RRDDIM *rd_backlog_backlog;
-
- RRDSET *st_busy;
- RRDDIM *rd_busy_busy;
-
- RRDSET *st_util;
- RRDDIM *rd_util_utilization;
-
- RRDSET *st_mops;
- RRDDIM *rd_mops_reads;
- RRDDIM *rd_mops_writes;
-
- RRDSET *st_ext_mops;
- RRDDIM *rd_mops_discards;
-
- RRDSET *st_iotime;
- RRDDIM *rd_iotime_reads;
- RRDDIM *rd_iotime_writes;
-
- RRDSET *st_ext_iotime;
- RRDDIM *rd_iotime_discards;
- RRDDIM *rd_iotime_flushes;
-
- RRDSET *st_await;
- RRDDIM *rd_await_reads;
- RRDDIM *rd_await_writes;
-
- RRDSET *st_ext_await;
- RRDDIM *rd_await_discards;
- RRDDIM *rd_await_flushes;
-
- RRDSET *st_avgsz;
- RRDDIM *rd_avgsz_reads;
- RRDDIM *rd_avgsz_writes;
-
- RRDSET *st_ext_avgsz;
- RRDDIM *rd_avgsz_discards;
-
- RRDSET *st_svctm;
- RRDDIM *rd_svctm_svctm;
-
- RRDSET *st_bcache_size;
- RRDDIM *rd_bcache_dirty_size;
-
- RRDSET *st_bcache_usage;
- RRDDIM *rd_bcache_available_percent;
-
- RRDSET *st_bcache_hit_ratio;
- RRDDIM *rd_bcache_hit_ratio_5min;
- RRDDIM *rd_bcache_hit_ratio_1hour;
- RRDDIM *rd_bcache_hit_ratio_1day;
- RRDDIM *rd_bcache_hit_ratio_total;
-
- RRDSET *st_bcache;
- RRDDIM *rd_bcache_hits;
- RRDDIM *rd_bcache_misses;
- RRDDIM *rd_bcache_miss_collisions;
-
- RRDSET *st_bcache_bypass;
- RRDDIM *rd_bcache_bypass_hits;
- RRDDIM *rd_bcache_bypass_misses;
-
- RRDSET *st_bcache_rates;
- RRDDIM *rd_bcache_rate_congested;
- RRDDIM *rd_bcache_readaheads;
- RRDDIM *rd_bcache_rate_writeback;
-
- RRDSET *st_bcache_cache_allocations;
- RRDDIM *rd_bcache_cache_allocations_unused;
- RRDDIM *rd_bcache_cache_allocations_clean;
- RRDDIM *rd_bcache_cache_allocations_dirty;
- RRDDIM *rd_bcache_cache_allocations_metadata;
- RRDDIM *rd_bcache_cache_allocations_unknown;
-
- RRDSET *st_bcache_cache_read_races;
- RRDDIM *rd_bcache_cache_read_races;
- RRDDIM *rd_bcache_cache_io_errors;
-
- struct disk *next;
-} *disk_root = NULL;
-
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
-
-// static char *path_to_get_hw_sector_size = NULL;
-// static char *path_to_get_hw_sector_size_partitions = NULL;
-static char *path_to_sys_dev_block_major_minor_string = NULL;
-static char *path_to_sys_block_device = NULL;
-static char *path_to_sys_block_device_bcache = NULL;
-static char *path_to_sys_devices_virtual_block_device = NULL;
-static char *path_to_device_mapper = NULL;
-static char *path_to_dev_disk = NULL;
-static char *path_to_sys_block = NULL;
-static char *path_to_device_label = NULL;
-static char *path_to_device_id = NULL;
-static char *path_to_veritas_volume_groups = NULL;
-static int name_disks_by_id = CONFIG_BOOLEAN_NO;
-static int global_bcache_priority_stats_update_every = 0; // disabled by default
-
-static int global_enable_new_disks_detected_at_runtime = CONFIG_BOOLEAN_YES,
- global_enable_performance_for_physical_disks = CONFIG_BOOLEAN_AUTO,
- global_enable_performance_for_virtual_disks = CONFIG_BOOLEAN_AUTO,
- global_enable_performance_for_partitions = CONFIG_BOOLEAN_NO,
- global_do_io = CONFIG_BOOLEAN_AUTO,
- global_do_ops = CONFIG_BOOLEAN_AUTO,
- global_do_mops = CONFIG_BOOLEAN_AUTO,
- global_do_iotime = CONFIG_BOOLEAN_AUTO,
- global_do_qops = CONFIG_BOOLEAN_AUTO,
- global_do_util = CONFIG_BOOLEAN_AUTO,
- global_do_ext = CONFIG_BOOLEAN_AUTO,
- global_do_backlog = CONFIG_BOOLEAN_AUTO,
- global_do_bcache = CONFIG_BOOLEAN_AUTO,
- globals_initialized = 0,
- global_cleanup_removed_disks = 1;
-
-static SIMPLE_PATTERN *preferred_ids = NULL;
-static SIMPLE_PATTERN *excluded_disks = NULL;
-
-static unsigned long long int bcache_read_number_with_units(const char *filename) {
- char buffer[50 + 1];
- if(read_file(filename, buffer, 50) == 0) {
- static int unknown_units_error = 10;
-
- char *end = NULL;
- NETDATA_DOUBLE value = str2ndd(buffer, &end);
- if(end && *end) {
- if(*end == 'k')
- return (unsigned long long int)(value * 1024.0);
- else if(*end == 'M')
- return (unsigned long long int)(value * 1024.0 * 1024.0);
- else if(*end == 'G')
- return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0);
- else if(*end == 'T')
- return (unsigned long long int)(value * 1024.0 * 1024.0 * 1024.0 * 1024.0);
- else if(unknown_units_error > 0) {
- collector_error("bcache file '%s' provides value '%s' with unknown units '%s'", filename, buffer, end);
- unknown_units_error--;
- }
- }
-
- return (unsigned long long int)value;
- }
-
- return 0;
-}
-
-void bcache_read_priority_stats(struct disk *d, const char *family, int update_every, usec_t dt) {
- static procfile *ff = NULL;
- static char *separators = " \t:%[]";
-
- static ARL_BASE *arl_base = NULL;
-
- static unsigned long long unused;
- static unsigned long long clean;
- static unsigned long long dirty;
- static unsigned long long metadata;
- static unsigned long long unknown;
-
- // check if it is time to update this metric
- d->bcache_priority_stats_elapsed_usec += dt;
- if(likely(d->bcache_priority_stats_elapsed_usec < d->bcache_priority_stats_update_every_usec)) return;
- d->bcache_priority_stats_elapsed_usec = 0;
-
- // initialize ARL
- if(unlikely(!arl_base)) {
- arl_base = arl_create("bcache/priority_stats", NULL, 60);
- arl_expect(arl_base, "Unused", &unused);
- arl_expect(arl_base, "Clean", &clean);
- arl_expect(arl_base, "Dirty", &dirty);
- arl_expect(arl_base, "Metadata", &metadata);
- }
-
- ff = procfile_reopen(ff, d->bcache_filename_priority_stats, separators, PROCFILE_FLAG_DEFAULT);
- if(likely(ff)) ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- separators = " \t:%[]";
- return;
- }
-
- // do not reset the separators on every iteration
- separators = NULL;
-
- arl_begin(arl_base);
- unused = clean = dirty = metadata = unknown = 0;
-
- size_t lines = procfile_lines(ff), l;
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) {
- if(unlikely(words)) collector_error("Cannot read '%s' line %zu. Expected 2 params, read %zu.", d->bcache_filename_priority_stats, l, words);
- continue;
- }
-
- if(unlikely(arl_check(arl_base,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- unknown = 100 - unused - clean - dirty - metadata;
-
- // create / update the cache allocations chart
- {
- if(unlikely(!d->st_bcache_cache_allocations)) {
- d->st_bcache_cache_allocations = rrdset_create_localhost(
- "disk_bcache_cache_alloc"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_cache_alloc"
- , "BCache Cache Allocations"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- d->rd_bcache_cache_allocations_unused = rrddim_add(d->st_bcache_cache_allocations, "unused", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_cache_allocations_dirty = rrddim_add(d->st_bcache_cache_allocations, "dirty", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_cache_allocations_clean = rrddim_add(d->st_bcache_cache_allocations, "clean", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_cache_allocations_metadata = rrddim_add(d->st_bcache_cache_allocations, "metadata", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_cache_allocations_unknown = rrddim_add(d->st_bcache_cache_allocations, "undefined", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- d->bcache_priority_stats_update_every_usec = update_every * USEC_PER_SEC;
- }
-
- rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unused, unused);
- rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_dirty, dirty);
- rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_clean, clean);
- rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_metadata, metadata);
- rrddim_set_by_pointer(d->st_bcache_cache_allocations, d->rd_bcache_cache_allocations_unknown, unknown);
- rrdset_done(d->st_bcache_cache_allocations);
- }
-}
-
-static inline int is_major_enabled(int major) {
- static int8_t *major_configs = NULL;
- static size_t major_size = 0;
-
- if(major < 0) return 1;
-
- size_t wanted_size = (size_t)major + 1;
-
- if(major_size < wanted_size) {
- major_configs = reallocz(major_configs, wanted_size * sizeof(int8_t));
-
- size_t i;
- for(i = major_size; i < wanted_size ; i++)
- major_configs[i] = -1;
-
- major_size = wanted_size;
- }
-
- if(major_configs[major] == -1) {
- char buffer[CONFIG_MAX_NAME + 1];
- snprintfz(buffer, CONFIG_MAX_NAME, "performance metrics for disks with major %d", major);
- major_configs[major] = (char)config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, buffer, 1);
- }
-
- return (int)major_configs[major];
-}
-
-static inline int get_disk_name_from_path(const char *path, char *result, size_t result_size, unsigned long major, unsigned long minor, char *disk, char *prefix, int depth) {
- //collector_info("DEVICE-MAPPER ('%s', %lu:%lu): examining directory '%s' (allowed depth %d).", disk, major, minor, path, depth);
-
- int found = 0, preferred = 0;
-
- char *first_result = mallocz(result_size + 1);
-
- DIR *dir = opendir(path);
- if (!dir) {
- if (errno == ENOENT)
- nd_log_collector(NDLP_DEBUG, "DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s': no such file or directory.", disk, major, minor, path);
- else
- collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot open directory '%s'.", disk, major, minor, path);
- goto failed;
- }
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if(de->d_type == DT_DIR) {
- if((de->d_name[0] == '.' && de->d_name[1] == '\0') || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0'))
- continue;
-
- if(depth <= 0) {
- collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Depth limit reached for path '%s/%s'. Ignoring path.", disk, major, minor, path, de->d_name);
- break;
- }
- else {
- char *path_nested = NULL;
- char *prefix_nested = NULL;
-
- {
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "%s/%s", path, de->d_name);
- path_nested = strdupz(buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
- prefix_nested = strdupz(buffer);
- }
-
- found = get_disk_name_from_path(path_nested, result, result_size, major, minor, disk, prefix_nested, depth - 1);
- freez(path_nested);
- freez(prefix_nested);
-
- if(found) break;
- }
- }
- else if(de->d_type == DT_LNK || de->d_type == DT_BLK) {
- char filename[FILENAME_MAX + 1];
-
- if(de->d_type == DT_LNK) {
- snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
- ssize_t len = readlink(filename, result, result_size - 1);
- if(len <= 0) {
- collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot read link '%s'.", disk, major, minor, filename);
- continue;
- }
-
- result[len] = '\0';
- if(result[0] != '/')
- snprintfz(filename, FILENAME_MAX, "%s/%s", path, result);
- else
- strncpyz(filename, result, FILENAME_MAX);
- }
- else {
- snprintfz(filename, FILENAME_MAX, "%s/%s", path, de->d_name);
- }
-
- struct stat sb;
- if(stat(filename, &sb) == -1) {
- collector_error("DEVICE-MAPPER ('%s', %lu:%lu): Cannot stat() file '%s'.", disk, major, minor, filename);
- continue;
- }
-
- if((sb.st_mode & S_IFMT) != S_IFBLK) {
- //collector_info("DEVICE-MAPPER ('%s', %lu:%lu): file '%s' is not a block device.", disk, major, minor, filename);
- continue;
- }
-
- if(major(sb.st_rdev) != major || minor(sb.st_rdev) != minor || strcmp(basename(filename), disk)) {
- //collector_info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' does not match %lu:%lu.", disk, major, minor, filename, (unsigned long)major(sb.st_rdev), (unsigned long)minor(sb.st_rdev));
- continue;
- }
-
- //collector_info("DEVICE-MAPPER ('%s', %lu:%lu): filename '%s' matches.", disk, major, minor, filename);
-
- snprintfz(result, result_size - 1, "%s%s%s", (prefix)?prefix:"", (prefix)?"_":"", de->d_name);
-
- if(!found) {
- strncpyz(first_result, result, result_size);
- found = 1;
- }
-
- if(simple_pattern_matches(preferred_ids, result)) {
- preferred = 1;
- break;
- }
- }
- }
- closedir(dir);
-
-
-failed:
-
- if(!found)
- result[0] = '\0';
- else if(!preferred)
- strncpyz(result, first_result, result_size);
-
- freez(first_result);
-
- return found;
-}
-
-static inline char *get_disk_name(unsigned long major, unsigned long minor, char *disk) {
- char result[FILENAME_MAX + 2] = "";
-
- if(!path_to_device_mapper || !*path_to_device_mapper || !get_disk_name_from_path(path_to_device_mapper, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
- if(!path_to_device_label || !*path_to_device_label || !get_disk_name_from_path(path_to_device_label, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
- if(!path_to_veritas_volume_groups || !*path_to_veritas_volume_groups || !get_disk_name_from_path(path_to_veritas_volume_groups, result, FILENAME_MAX + 1, major, minor, disk, "vx", 2))
- if(name_disks_by_id != CONFIG_BOOLEAN_YES || !path_to_device_id || !*path_to_device_id || !get_disk_name_from_path(path_to_device_id, result, FILENAME_MAX + 1, major, minor, disk, NULL, 0))
- strncpy(result, disk, FILENAME_MAX);
-
- if(!result[0])
- strncpy(result, disk, FILENAME_MAX);
-
- netdata_fix_chart_name(result);
- return strdup(result);
-}
-
-static inline bool ends_with(const char *str, const char *suffix) {
- if (!str || !suffix)
- return false;
-
- size_t len_str = strlen(str);
- size_t len_suffix = strlen(suffix);
- if (len_suffix > len_str)
- return false;
-
- return strncmp(str + len_str - len_suffix, suffix, len_suffix) == 0;
-}
-
-static inline char *get_disk_by_id(char *device) {
- char pathname[256 + 1];
- snprintfz(pathname, sizeof(pathname) - 1, "%s/by-id", path_to_dev_disk);
-
- struct dirent *entry;
- DIR *dp = opendir(pathname);
- if (dp == NULL) {
- internal_error(true, "Cannot open '%s'", pathname);
- return NULL;
- }
-
- while ((entry = readdir(dp))) {
- // We ignore the '.' and '..' entries
- if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
- continue;
-
- if(strncmp(entry->d_name, "md-uuid-", 8) == 0 ||
- strncmp(entry->d_name, "dm-uuid-", 8) == 0 ||
- strncmp(entry->d_name, "nvme-eui.", 9) == 0 ||
- strncmp(entry->d_name, "wwn-", 4) == 0 ||
- strncmp(entry->d_name, "lvm-pv-uuid-", 12) == 0)
- continue;
-
- char link_target[256 + 1];
- char full_path[256 + 1];
- snprintfz(full_path, 256, "%s/%s", pathname, entry->d_name);
-
- ssize_t len = readlink(full_path, link_target, 256);
- if (len == -1)
- continue;
-
- link_target[len] = '\0';
-
- if (ends_with(link_target, device)) {
- char *s = strdupz(entry->d_name);
- closedir(dp);
- return s;
- }
- }
-
- closedir(dp);
- return NULL;
-}
-
-static inline char *get_disk_model(char *device) {
- char path[256 + 1];
- char buffer[256 + 1];
-
- snprintfz(path, sizeof(path) - 1, "%s/%s/device/model", path_to_sys_block, device);
- if(read_file(path, buffer, 256) != 0) {
- snprintfz(path, sizeof(path) - 1, "%s/%s/device/name", path_to_sys_block, device);
- if(read_file(path, buffer, 256) != 0)
- return NULL;
- }
-
- char *clean = trim(buffer);
- if (!clean)
- return NULL;
-
- return strdupz(clean);
-}
-
-static inline char *get_disk_serial(char *device) {
- char path[256 + 1];
- char buffer[256 + 1];
-
- snprintfz(path, sizeof(path) - 1, "%s/%s/device/serial", path_to_sys_block, device);
- if(read_file(path, buffer, 256) != 0)
- return NULL;
-
- return strdupz(buffer);
-}
-
-//static inline bool get_disk_rotational(char *device) {
-// char path[256 + 1];
-// char buffer[256 + 1];
-//
-// snprintfz(path, 256, "%s/%s/queue/rotational", path_to_sys_block, device);
-// if(read_file(path, buffer, 256) != 0)
-// return false;
-//
-// return buffer[0] == '1';
-//}
-//
-//static inline bool get_disk_removable(char *device) {
-// char path[256 + 1];
-// char buffer[256 + 1];
-//
-// snprintfz(path, 256, "%s/%s/removable", path_to_sys_block, device);
-// if(read_file(path, buffer, 256) != 0)
-// return false;
-//
-// return buffer[0] == '1';
-//}
-
-static void get_disk_config(struct disk *d) {
- int def_enable = global_enable_new_disks_detected_at_runtime;
-
- if(def_enable != CONFIG_BOOLEAN_NO && (simple_pattern_matches(excluded_disks, d->device) || simple_pattern_matches(excluded_disks, d->disk))) {
- d->excluded = true;
- def_enable = CONFIG_BOOLEAN_NO;
- }
-
- char var_name[4096 + 1];
- snprintfz(var_name, 4096, CONFIG_SECTION_PLUGIN_PROC_DISKSTATS ":%s", d->disk);
-
- if (config_exists(var_name, "enable"))
- def_enable = config_get_boolean_ondemand(var_name, "enable", def_enable);
-
- if(unlikely(def_enable == CONFIG_BOOLEAN_NO)) {
- // the user does not want any metrics for this disk
- d->do_io = CONFIG_BOOLEAN_NO;
- d->do_ops = CONFIG_BOOLEAN_NO;
- d->do_mops = CONFIG_BOOLEAN_NO;
- d->do_iotime = CONFIG_BOOLEAN_NO;
- d->do_qops = CONFIG_BOOLEAN_NO;
- d->do_util = CONFIG_BOOLEAN_NO;
- d->do_ext = CONFIG_BOOLEAN_NO;
- d->do_backlog = CONFIG_BOOLEAN_NO;
- d->do_bcache = CONFIG_BOOLEAN_NO;
- }
- else {
- // this disk is enabled
- // check its direct settings
-
- int def_performance = CONFIG_BOOLEAN_AUTO;
-
- // since this is 'on demand' we can figure the performance settings
- // based on the type of disk
-
- if(!d->device_is_bcache) {
- switch(d->type) {
- default:
- case DISK_TYPE_UNKNOWN:
- break;
-
- case DISK_TYPE_PHYSICAL:
- def_performance = global_enable_performance_for_physical_disks;
- break;
-
- case DISK_TYPE_PARTITION:
- def_performance = global_enable_performance_for_partitions;
- break;
-
- case DISK_TYPE_VIRTUAL:
- def_performance = global_enable_performance_for_virtual_disks;
- break;
- }
- }
-
- // check if we have to disable performance for this disk
- if(def_performance)
- def_performance = is_major_enabled((int)d->major);
-
- // ------------------------------------------------------------
- // now we have def_performance and def_space
- // to work further
-
- // def_performance
- // check the user configuration (this will also show our 'on demand' decision)
- if (config_exists(var_name, "enable performance metrics"))
- def_performance = config_get_boolean_ondemand(var_name, "enable performance metrics", def_performance);
-
- int ddo_io = CONFIG_BOOLEAN_NO,
- ddo_ops = CONFIG_BOOLEAN_NO,
- ddo_mops = CONFIG_BOOLEAN_NO,
- ddo_iotime = CONFIG_BOOLEAN_NO,
- ddo_qops = CONFIG_BOOLEAN_NO,
- ddo_util = CONFIG_BOOLEAN_NO,
- ddo_ext = CONFIG_BOOLEAN_NO,
- ddo_backlog = CONFIG_BOOLEAN_NO,
- ddo_bcache = CONFIG_BOOLEAN_NO;
-
- // we enable individual performance charts only when def_performance is not disabled
- if(unlikely(def_performance != CONFIG_BOOLEAN_NO)) {
- ddo_io = global_do_io,
- ddo_ops = global_do_ops,
- ddo_mops = global_do_mops,
- ddo_iotime = global_do_iotime,
- ddo_qops = global_do_qops,
- ddo_util = global_do_util,
- ddo_ext = global_do_ext,
- ddo_backlog = global_do_backlog,
- ddo_bcache = global_do_bcache;
- } else {
- d->excluded = true;
- }
-
- d->do_io = ddo_io;
- d->do_ops = ddo_ops;
- d->do_mops = ddo_mops;
- d->do_iotime = ddo_iotime;
- d->do_qops = ddo_qops;
- d->do_util = ddo_util;
- d->do_ext = ddo_ext;
- d->do_backlog = ddo_backlog;
-
- if (config_exists(var_name, "bandwidth"))
- d->do_io = config_get_boolean_ondemand(var_name, "bandwidth", ddo_io);
- if (config_exists(var_name, "operations"))
- d->do_ops = config_get_boolean_ondemand(var_name, "operations", ddo_ops);
- if (config_exists(var_name, "merged operations"))
- d->do_mops = config_get_boolean_ondemand(var_name, "merged operations", ddo_mops);
- if (config_exists(var_name, "i/o time"))
- d->do_iotime = config_get_boolean_ondemand(var_name, "i/o time", ddo_iotime);
- if (config_exists(var_name, "queued operations"))
- d->do_qops = config_get_boolean_ondemand(var_name, "queued operations", ddo_qops);
- if (config_exists(var_name, "utilization percentage"))
- d->do_util = config_get_boolean_ondemand(var_name, "utilization percentage", ddo_util);
- if (config_exists(var_name, "extended operations"))
- d->do_ext = config_get_boolean_ondemand(var_name, "extended operations", ddo_ext);
- if (config_exists(var_name, "backlog"))
- d->do_backlog = config_get_boolean_ondemand(var_name, "backlog", ddo_backlog);
-
- d->do_bcache = ddo_bcache;
-
- if (d->device_is_bcache) {
- if (config_exists(var_name, "bcache"))
- d->do_bcache = config_get_boolean_ondemand(var_name, "bcache", ddo_bcache);
- } else {
- d->do_bcache = 0;
- }
- }
-}
-
-static struct disk *get_disk(unsigned long major, unsigned long minor, char *disk) {
- static struct mountinfo *disk_mountinfo_root = NULL;
-
- struct disk *d;
-
- uint32_t hash = simple_hash(disk);
-
- // search for it in our RAM list.
- // this is sequential, but since we just walk through
- // and the number of disks / partitions in a system
- // should not be that many, it should be acceptable
- for(d = disk_root; d ; d = d->next){
- if (unlikely(
- d->major == major && d->minor == minor && d->hash == hash && !strcmp(d->device, disk)))
- return d;
- }
-
- // not found
- // create a new disk structure
- d = (struct disk *)callocz(1, sizeof(struct disk));
-
- d->excluded = false;
- d->function_ready = false;
- d->disk = get_disk_name(major, minor, disk);
- d->device = strdupz(disk);
- d->disk_by_id = get_disk_by_id(disk);
- d->model = get_disk_model(disk);
- d->serial = get_disk_serial(disk);
-// d->rotational = get_disk_rotational(disk);
-// d->removable = get_disk_removable(disk);
- d->hash = simple_hash(d->device);
- d->major = major;
- d->minor = minor;
- d->type = DISK_TYPE_UNKNOWN; // Default type. Changed later if not correct.
- d->sector_size = 512; // the default, will be changed below
- d->next = NULL;
-
- // append it to the list
- if(unlikely(!disk_root))
- disk_root = d;
- else {
- struct disk *last;
- for(last = disk_root; last->next ;last = last->next);
- last->next = d;
- }
-
- d->chart_id = strdupz(d->device);
-
- // read device uuid if it is an LVM volume
- if (!strncmp(d->device, "dm-", 3)) {
- char uuid_filename[FILENAME_MAX + 1];
- int size = snprintfz(uuid_filename, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk);
- strncat(uuid_filename, "/dm/uuid", FILENAME_MAX - size);
-
- char device_uuid[RRD_ID_LENGTH_MAX + 1];
- if (!read_file(uuid_filename, device_uuid, RRD_ID_LENGTH_MAX) && !strncmp(device_uuid, "LVM-", 4)) {
- trim(device_uuid);
-
- char chart_id[RRD_ID_LENGTH_MAX + 1];
- snprintf(chart_id, RRD_ID_LENGTH_MAX, "%s-%s", d->device, device_uuid + 4);
-
- freez(d->chart_id);
- d->chart_id = strdupz(chart_id);
- }
- }
-
- char buffer[FILENAME_MAX + 1];
-
- // find if it is a physical disk
- // by checking if /sys/block/DISK is readable.
- snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device, disk);
- if(likely(access(buffer, R_OK) == 0)) {
- // assign it here, but it will be overwritten if it is not a physical disk
- d->type = DISK_TYPE_PHYSICAL;
- }
-
- // find if it is a partition
- // by checking if /sys/dev/block/MAJOR:MINOR/partition is readable.
- snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "partition");
- if(likely(access(buffer, R_OK) == 0)) {
- d->type = DISK_TYPE_PARTITION;
- }
- else {
- // find if it is a virtual disk
- // by checking if /sys/devices/virtual/block/DISK is readable.
- snprintfz(buffer, FILENAME_MAX, path_to_sys_devices_virtual_block_device, disk);
- if(likely(access(buffer, R_OK) == 0)) {
- d->type = DISK_TYPE_VIRTUAL;
- }
- else {
- // find if it is a virtual device
- // by checking if /sys/dev/block/MAJOR:MINOR/slaves has entries
- snprintfz(buffer, FILENAME_MAX, path_to_sys_dev_block_major_minor_string, major, minor, "slaves/");
- DIR *dirp = opendir(buffer);
- if (likely(dirp != NULL)) {
- struct dirent *dp;
- while ((dp = readdir(dirp))) {
- // . and .. are also files in empty folders.
- if (unlikely(strcmp(dp->d_name, ".") == 0 || strcmp(dp->d_name, "..") == 0)) {
- continue;
- }
-
- d->type = DISK_TYPE_VIRTUAL;
-
- // Stop the loop after we found one file.
- break;
- }
- if (unlikely(closedir(dirp) == -1))
- collector_error("Unable to close dir %s", buffer);
- }
- }
- }
-
- // ------------------------------------------------------------------------
- // check if we can find its mount point
-
- // mountinfo_find() can be called with NULL disk_mountinfo_root
- struct mountinfo *mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor, d->device);
- if(unlikely(!mi)) {
- // mountinfo_free_all can be called with NULL
- mountinfo_free_all(disk_mountinfo_root);
- disk_mountinfo_root = mountinfo_read(0);
- mi = mountinfo_find(disk_mountinfo_root, d->major, d->minor, d->device);
- }
-
- if(unlikely(mi))
- d->mount_point = strdupz(mi->mount_point);
- else
- d->mount_point = NULL;
-
- // ------------------------------------------------------------------------
- // find the disk sector size
-
- /*
- * sector size is always 512 bytes inside the kernel #3481
- *
- {
- char tf[FILENAME_MAX + 1], *t;
- strncpyz(tf, d->device, FILENAME_MAX);
-
- // replace all / with !
- for(t = tf; *t ;t++)
- if(unlikely(*t == '/')) *t = '!';
-
- if(likely(d->type == DISK_TYPE_PARTITION))
- snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size_partitions, d->major, d->minor, tf);
- else
- snprintfz(buffer, FILENAME_MAX, path_to_get_hw_sector_size, tf);
-
- FILE *fpss = fopen(buffer, "r");
- if(likely(fpss)) {
- char buffer2[1024 + 1];
- char *tmp = fgets(buffer2, 1024, fpss);
-
- if(likely(tmp)) {
- d->sector_size = str2i(tmp);
- if(unlikely(d->sector_size <= 0)) {
- collector_error("Invalid sector size %d for device %s in %s. Assuming 512.", d->sector_size, d->device, buffer);
- d->sector_size = 512;
- }
- }
- else collector_error("Cannot read data for sector size for device %s from %s. Assuming 512.", d->device, buffer);
-
- fclose(fpss);
- }
- else collector_error("Cannot read sector size for device %s from %s. Assuming 512.", d->device, buffer);
- }
- */
-
- // ------------------------------------------------------------------------
- // check if the device is a bcache
-
- struct stat bcache;
- snprintfz(buffer, FILENAME_MAX, path_to_sys_block_device_bcache, disk);
- if(unlikely(stat(buffer, &bcache) == 0 && (bcache.st_mode & S_IFMT) == S_IFDIR)) {
- // we have the 'bcache' directory
- d->device_is_bcache = 1;
-
- char buffer2[FILENAME_MAX + 1];
-
- snprintfz(buffer2, FILENAME_MAX, "%s/cache/congested", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_cache_congested = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/readahead", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_readaheads = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/priority_stats", buffer); // only one cache is supported by bcache
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_priority_stats = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/cache/internal/cache_read_races", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_cache_read_races = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache0/io_errors", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_cache_io_errors = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/dirty_data", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_dirty_data = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/writeback_rate", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_writeback_rate = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/cache/cache_available_percent", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_cache_available_percent = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hits", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_hits = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_five_minute/cache_hit_ratio", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_five_minute_cache_hit_ratio = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_hour/cache_hit_ratio", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_hour_cache_hit_ratio = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_day/cache_hit_ratio", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_day_cache_hit_ratio = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_hit_ratio", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_hit_ratio = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_misses", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_misses = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_hits", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_bypass_hits = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_bypass_misses", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_bypass_misses = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
-
- snprintfz(buffer2, FILENAME_MAX, "%s/stats_total/cache_miss_collisions", buffer);
- if(access(buffer2, R_OK) == 0)
- d->bcache_filename_stats_total_cache_miss_collisions = strdupz(buffer2);
- else
- collector_error("bcache file '%s' cannot be read.", buffer2);
- }
-
- get_disk_config(d);
-
- return d;
-}
-
-static const char *get_disk_type_string(int disk_type) {
- switch (disk_type) {
- case DISK_TYPE_PHYSICAL:
- return "physical";
- case DISK_TYPE_PARTITION:
- return "partition";
- case DISK_TYPE_VIRTUAL:
- return "virtual";
- default:
- return "unknown";
- }
-}
-
-static void add_labels_to_disk(struct disk *d, RRDSET *st) {
- rrdlabels_add(st->rrdlabels, "device", d->disk, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "mount_point", d->mount_point, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "id", d->disk_by_id, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "model", d->model, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "serial", d->serial, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "device_type", get_disk_type_string(d->type), RRDLABEL_SRC_AUTO);
-}
-
-static int diskstats_function_block_devices(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
- void *collector_data __maybe_unused,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
- void *register_canceller_cb_data __maybe_unused) {
-
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
-
- buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_DISKSTATS_HELP);
- buffer_json_member_add_array(wb, "data");
-
- double max_io_reads = 0.0;
- double max_io_writes = 0.0;
- double max_io = 0.0;
- double max_backlog_time = 0.0;
- double max_busy_time = 0.0;
- double max_busy_perc = 0.0;
- double max_iops_reads = 0.0;
- double max_iops_writes = 0.0;
- double max_iops_time_reads = 0.0;
- double max_iops_time_writes = 0.0;
- double max_iops_avg_time_read = 0.0;
- double max_iops_avg_time_write = 0.0;
- double max_iops_avg_size_read = 0.0;
- double max_iops_avg_size_write = 0.0;
-
- netdata_mutex_lock(&diskstats_dev_mutex);
-
- for (struct disk *d = disk_root; d; d = d->next) {
- if (unlikely(!d->function_ready))
- continue;
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, d->device);
- buffer_json_add_array_item_string(wb, get_disk_type_string(d->type));
- buffer_json_add_array_item_string(wb, d->disk_by_id);
- buffer_json_add_array_item_string(wb, d->model);
- buffer_json_add_array_item_string(wb, d->serial);
-
- // IO
- double io_reads = rrddim_get_last_stored_value(d->rd_io_reads, &max_io_reads, 1024.0);
- double io_writes = rrddim_get_last_stored_value(d->rd_io_writes, &max_io_writes, 1024.0);
- double io_total = NAN;
- if (!isnan(io_reads) && !isnan(io_writes)) {
- io_total = io_reads + io_writes;
- max_io = MAX(max_io, io_total);
- }
- // Backlog and Busy Time
- double busy_perc = rrddim_get_last_stored_value(d->rd_util_utilization, &max_busy_perc, 1);
- double busy_time = rrddim_get_last_stored_value(d->rd_busy_busy, &max_busy_time, 1);
- double backlog_time = rrddim_get_last_stored_value(d->rd_backlog_backlog, &max_backlog_time, 1);
- // IOPS
- double iops_reads = rrddim_get_last_stored_value(d->rd_ops_reads, &max_iops_reads, 1);
- double iops_writes = rrddim_get_last_stored_value(d->rd_ops_writes, &max_iops_writes, 1);
- // IO Time
- double iops_time_reads = rrddim_get_last_stored_value(d->rd_iotime_reads, &max_iops_time_reads, 1);
- double iops_time_writes = rrddim_get_last_stored_value(d->rd_iotime_writes, &max_iops_time_writes, 1);
- // Avg IO Time
- double iops_avg_time_read = rrddim_get_last_stored_value(d->rd_await_reads, &max_iops_avg_time_read, 1);
- double iops_avg_time_write = rrddim_get_last_stored_value(d->rd_await_writes, &max_iops_avg_time_write, 1);
- // Avg IO Size
- double iops_avg_size_read = rrddim_get_last_stored_value(d->rd_avgsz_reads, &max_iops_avg_size_read, 1);
- double iops_avg_size_write = rrddim_get_last_stored_value(d->rd_avgsz_writes, &max_iops_avg_size_write, 1);
-
-
- buffer_json_add_array_item_double(wb, io_reads);
- buffer_json_add_array_item_double(wb, io_writes);
- buffer_json_add_array_item_double(wb, io_total);
- buffer_json_add_array_item_double(wb, busy_perc);
- buffer_json_add_array_item_double(wb, busy_time);
- buffer_json_add_array_item_double(wb, backlog_time);
- buffer_json_add_array_item_double(wb, iops_reads);
- buffer_json_add_array_item_double(wb, iops_writes);
- buffer_json_add_array_item_double(wb, iops_time_reads);
- buffer_json_add_array_item_double(wb, iops_time_writes);
- buffer_json_add_array_item_double(wb, iops_avg_time_read);
- buffer_json_add_array_item_double(wb, iops_avg_time_write);
- buffer_json_add_array_item_double(wb, iops_avg_size_read);
- buffer_json_add_array_item_double(wb, iops_avg_size_write);
-
- // End
- buffer_json_array_close(wb);
- }
-
- netdata_mutex_unlock(&diskstats_dev_mutex);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- buffer_rrdf_table_add_field(wb, field_id++, "Device", "Device Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Type", "Device Type",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "ID", "Device ID",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Model", "Device Model",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Serial", "Device Serial Number",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Read", "Data Read from Device",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_io_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Written", "Data Writen to Device",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_io_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Total", "Data Transferred to and from Device",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "MiB", max_io, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Busy%", "Disk Busy Percentage",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "%", max_busy_perc, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Busy", "Disk Busy Time",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_busy_time, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Backlog", "Disk Backlog",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_backlog_time, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Reads", "Completed Read Operations",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "ops", max_iops_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "Writes", "Completed Write Operations",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "ops", max_iops_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "ReadsTime", "Read Operations Time",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_iops_time_reads, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "WritesTime", "Write Operations Time",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_iops_time_writes, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgTime", "Average Read Operation Service Time",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_iops_avg_time_read, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgTime", "Average Write Operation Service Time",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "milliseconds", max_iops_avg_time_write, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "ReadAvgSz", "Average Read Operation Size",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "KiB", max_iops_avg_size_read, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "WriteAvgSz", "Average Write Operation Size",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "KiB", max_iops_avg_size_write, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "Total");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "IO");
- {
- buffer_json_member_add_string(wb, "name", "IO");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Read");
- buffer_json_add_array_item_string(wb, "Written");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Busy");
- {
- buffer_json_member_add_string(wb, "name", "Busy");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Busy");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "IO");
- buffer_json_add_array_item_string(wb, "Device");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Busy");
- buffer_json_add_array_item_string(wb, "Device");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_object(wb, "group_by");
- {
- buffer_json_member_add_object(wb, "Type");
- {
- buffer_json_member_add_string(wb, "name", "Type");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Type");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- int response = HTTP_RESP_OK;
- if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
- buffer_flush(wb);
- response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
- }
-
- if(result_cb)
- result_cb(wb, response, result_cb_data);
-
- return response;
-}
-
-static void diskstats_cleanup_disks() {
- struct disk *d = disk_root, *last = NULL;
- while (d) {
- if (unlikely(global_cleanup_removed_disks && !d->updated)) {
- struct disk *t = d;
-
- rrdset_obsolete_and_pointer_null(d->st_avgsz);
- rrdset_obsolete_and_pointer_null(d->st_ext_avgsz);
- rrdset_obsolete_and_pointer_null(d->st_await);
- rrdset_obsolete_and_pointer_null(d->st_ext_await);
- rrdset_obsolete_and_pointer_null(d->st_backlog);
- rrdset_obsolete_and_pointer_null(d->st_busy);
- rrdset_obsolete_and_pointer_null(d->st_io);
- rrdset_obsolete_and_pointer_null(d->st_ext_io);
- rrdset_obsolete_and_pointer_null(d->st_iotime);
- rrdset_obsolete_and_pointer_null(d->st_ext_iotime);
- rrdset_obsolete_and_pointer_null(d->st_mops);
- rrdset_obsolete_and_pointer_null(d->st_ext_mops);
- rrdset_obsolete_and_pointer_null(d->st_ops);
- rrdset_obsolete_and_pointer_null(d->st_ext_ops);
- rrdset_obsolete_and_pointer_null(d->st_qops);
- rrdset_obsolete_and_pointer_null(d->st_svctm);
- rrdset_obsolete_and_pointer_null(d->st_util);
- rrdset_obsolete_and_pointer_null(d->st_bcache);
- rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
- rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
- rrdset_obsolete_and_pointer_null(d->st_bcache_size);
- rrdset_obsolete_and_pointer_null(d->st_bcache_usage);
- rrdset_obsolete_and_pointer_null(d->st_bcache_hit_ratio);
- rrdset_obsolete_and_pointer_null(d->st_bcache_cache_allocations);
- rrdset_obsolete_and_pointer_null(d->st_bcache_cache_read_races);
-
- if (d == disk_root) {
- disk_root = d = d->next;
- last = NULL;
- } else if (last) {
- last->next = d = d->next;
- }
-
- freez(t->bcache_filename_dirty_data);
- freez(t->bcache_filename_writeback_rate);
- freez(t->bcache_filename_cache_congested);
- freez(t->bcache_filename_cache_available_percent);
- freez(t->bcache_filename_stats_five_minute_cache_hit_ratio);
- freez(t->bcache_filename_stats_hour_cache_hit_ratio);
- freez(t->bcache_filename_stats_day_cache_hit_ratio);
- freez(t->bcache_filename_stats_total_cache_hit_ratio);
- freez(t->bcache_filename_stats_total_cache_hits);
- freez(t->bcache_filename_stats_total_cache_misses);
- freez(t->bcache_filename_stats_total_cache_miss_collisions);
- freez(t->bcache_filename_stats_total_cache_bypass_hits);
- freez(t->bcache_filename_stats_total_cache_bypass_misses);
- freez(t->bcache_filename_stats_total_cache_readaheads);
- freez(t->bcache_filename_cache_read_races);
- freez(t->bcache_filename_cache_io_errors);
- freez(t->bcache_filename_priority_stats);
-
- freez(t->disk);
- freez(t->device);
- freez(t->disk_by_id);
- freez(t->model);
- freez(t->serial);
- freez(t->mount_point);
- freez(t->chart_id);
- freez(t);
- } else {
- d->updated = 0;
- last = d;
- d = d->next;
- }
- }
-}
-
-int do_proc_diskstats(int update_every, usec_t dt) {
- static procfile *ff = NULL;
-
- if(unlikely(!globals_initialized)) {
- globals_initialized = 1;
-
- global_enable_new_disks_detected_at_runtime = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "enable new disks detected at runtime", global_enable_new_disks_detected_at_runtime);
- global_enable_performance_for_physical_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for physical disks", global_enable_performance_for_physical_disks);
- global_enable_performance_for_virtual_disks = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for virtual disks", global_enable_performance_for_virtual_disks);
- global_enable_performance_for_partitions = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "performance metrics for partitions", global_enable_performance_for_partitions);
-
- global_do_io = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bandwidth for all disks", global_do_io);
- global_do_ops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "operations for all disks", global_do_ops);
- global_do_mops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "merged operations for all disks", global_do_mops);
- global_do_iotime = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "i/o time for all disks", global_do_iotime);
- global_do_qops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "queued operations for all disks", global_do_qops);
- global_do_util = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "utilization percentage for all disks", global_do_util);
- global_do_ext = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "extended operations for all disks", global_do_ext);
- global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog);
- global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache);
- global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
-
- global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
-
- char buffer[FILENAME_MAX + 1];
-
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s");
- path_to_sys_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/bcache");
- path_to_sys_block_device_bcache = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device bcache", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/block/%s");
- path_to_sys_devices_virtual_block_device = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get virtual block device", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/%s");
- path_to_sys_dev_block_major_minor_string = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get block device infos", buffer);
-
- //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/queue/hw_sector_size");
- //path_to_get_hw_sector_size = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size", buffer);
-
- //snprintfz(buffer, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/dev/block/%lu:%lu/subsystem/%s/../queue/hw_sector_size");
- //path_to_get_hw_sector_size_partitions = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to get h/w sector size for partitions", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/dev/mapper", netdata_configured_host_prefix);
- path_to_device_mapper = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to device mapper", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/dev/disk", netdata_configured_host_prefix);
- path_to_dev_disk = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/sys/block", netdata_configured_host_prefix);
- path_to_sys_block = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /sys/block", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-label", netdata_configured_host_prefix);
- path_to_device_label = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-label", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/dev/disk/by-id", netdata_configured_host_prefix);
- path_to_device_id = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/disk/by-id", buffer);
-
- snprintfz(buffer, FILENAME_MAX, "%s/dev/vx/dsk", netdata_configured_host_prefix);
- path_to_veritas_volume_groups = config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "path to /dev/vx/dsk", buffer);
-
- name_disks_by_id = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "name disks by id", name_disks_by_id);
-
- preferred_ids = simple_pattern_create(
- config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "preferred disk ids", DEFAULT_PREFERRED_IDS), NULL,
- SIMPLE_PATTERN_EXACT, true);
-
- excluded_disks = simple_pattern_create(
- config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "exclude disks", DEFAULT_EXCLUDED_DISKS), NULL,
- SIMPLE_PATTERN_EXACT, true);
- }
-
- // --------------------------------------------------------------------------
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/diskstats");
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
- }
- if(unlikely(!ff)) return 0;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- static bool add_func = true;
- if (add_func) {
- rrd_function_add(localhost, NULL, "block-devices", 10, RRDFUNCTIONS_DISKSTATS_HELP, true, diskstats_function_block_devices, NULL);
- add_func = false;
- }
-
- size_t lines = procfile_lines(ff), l;
-
- collected_number system_read_kb = 0, system_write_kb = 0;
-
- int do_dc_stats = 0, do_fl_stats = 0;
-
- netdata_mutex_lock(&diskstats_dev_mutex);
-
- for(l = 0; l < lines ;l++) {
- // --------------------------------------------------------------------------
- // Read parameters
-
- char *disk;
- unsigned long major = 0, minor = 0;
-
- collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0,
- writes = 0, mwrites = 0, writesectors = 0, writems = 0,
- queued_ios = 0, busy_ms = 0, backlog_ms = 0,
- discards = 0, mdiscards = 0, discardsectors = 0, discardms = 0,
- flushes = 0, flushms = 0;
-
-
- collected_number last_reads = 0, last_readsectors = 0, last_readms = 0,
- last_writes = 0, last_writesectors = 0, last_writems = 0,
- last_busy_ms = 0,
- last_discards = 0, last_discardsectors = 0, last_discardms = 0,
- last_flushes = 0, last_flushms = 0;
-
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 14)) continue;
-
- major = str2ul(procfile_lineword(ff, l, 0));
- minor = str2ul(procfile_lineword(ff, l, 1));
- disk = procfile_lineword(ff, l, 2);
-
- // # of reads completed # of writes completed
- // This is the total number of reads or writes completed successfully.
- reads = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios
- writes = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios
-
- // # of reads merged # of writes merged
- // Reads and writes which are adjacent to each other may be merged for
- // efficiency. Thus two 4K reads may become one 8K read before it is
- // ultimately handed to the disk, and so it will be counted (and queued)
- mreads = str2ull(procfile_lineword(ff, l, 4), NULL); // rd_merges_or_rd_sec
- mwrites = str2ull(procfile_lineword(ff, l, 8), NULL); // wr_merges
-
- // # of sectors read # of sectors written
- // This is the total number of sectors read or written successfully.
- readsectors = str2ull(procfile_lineword(ff, l, 5), NULL); // rd_sec_or_wr_ios
- writesectors = str2ull(procfile_lineword(ff, l, 9), NULL); // wr_sec
-
- // # of milliseconds spent reading # of milliseconds spent writing
- // This is the total number of milliseconds spent by all reads or writes (as
- // measured from __make_request() to end_that_request_last()).
- readms = str2ull(procfile_lineword(ff, l, 6), NULL); // rd_ticks_or_wr_sec
- writems = str2ull(procfile_lineword(ff, l, 10), NULL); // wr_ticks
-
- // # of I/Os currently in progress
- // The only field that should go to zero. Incremented as requests are
- // given to appropriate struct request_queue and decremented as they finish.
- queued_ios = str2ull(procfile_lineword(ff, l, 11), NULL); // ios_pgr
-
- // # of milliseconds spent doing I/Os
- // This field increases so long as field queued_ios is nonzero.
- busy_ms = str2ull(procfile_lineword(ff, l, 12), NULL); // tot_ticks
-
- // weighted # of milliseconds spent doing I/Os
- // This field is incremented at each I/O start, I/O completion, I/O
- // merge, or read of these stats by the number of I/Os in progress
- // (field queued_ios) times the number of milliseconds spent doing I/O since the
- // last update of this field. This can provide an easy measure of both
- // I/O completion time and the backlog that may be accumulating.
- backlog_ms = str2ull(procfile_lineword(ff, l, 13), NULL); // rq_ticks
-
- if (unlikely(words > 13)) {
- do_dc_stats = 1;
-
- // # of discards completed
- // This is the total number of discards completed successfully.
- discards = str2ull(procfile_lineword(ff, l, 14), NULL); // dc_ios
-
- // # of discards merged
- // See the description of mreads/mwrites
- mdiscards = str2ull(procfile_lineword(ff, l, 15), NULL); // dc_merges
-
- // # of sectors discarded
- // This is the total number of sectors discarded successfully.
- discardsectors = str2ull(procfile_lineword(ff, l, 16), NULL); // dc_sec
-
- // # of milliseconds spent discarding
- // This is the total number of milliseconds spent by all discards (as
- // measured from __make_request() to end_that_request_last()).
- discardms = str2ull(procfile_lineword(ff, l, 17), NULL); // dc_ticks
- }
-
- if (unlikely(words > 17)) {
- do_fl_stats = 1;
-
- // number of flush I/Os processed
- // These values increment when an flush I/O request completes.
- // Block layer combines flush requests and executes at most one at a time.
- // This counts flush requests executed by disk. Not tracked for partitions.
- flushes = str2ull(procfile_lineword(ff, l, 18), NULL); // fl_ios
-
- // total wait time for flush requests
- flushms = str2ull(procfile_lineword(ff, l, 19), NULL); // fl_ticks
- }
-
- // --------------------------------------------------------------------------
- // get a disk structure for the disk
-
- struct disk *d = get_disk(major, minor, disk);
- d->updated = 1;
-
- // --------------------------------------------------------------------------
- // count the global system disk I/O of physical disks
-
- if(unlikely(d->type == DISK_TYPE_PHYSICAL)) {
- system_read_kb += readsectors * d->sector_size / 1024;
- system_write_kb += writesectors * d->sector_size / 1024;
- }
-
- // --------------------------------------------------------------------------
- // Set its family based on mount point
-
- char *family = d->mount_point;
- if(!family) family = d->disk;
-
-
- // --------------------------------------------------------------------------
- // Do performance metrics
- if(d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO &&
- (readsectors || writesectors || discardsectors ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_io = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_io)) {
- d->st_io = rrdset_create_localhost(
- RRD_TYPE_DISK
- , d->chart_id
- , d->disk
- , family
- , "disk.io"
- , "Disk I/O Bandwidth"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_IO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- d->rd_io_reads = rrddim_add(d->st_io, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL);
- d->rd_io_writes = rrddim_add(d->st_io, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_io);
- }
-
- last_readsectors = rrddim_set_by_pointer(d->st_io, d->rd_io_reads, readsectors);
- last_writesectors = rrddim_set_by_pointer(d->st_io, d->rd_io_writes, writesectors);
- rrdset_done(d->st_io);
- }
-
- if (do_dc_stats && d->do_io == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- if (unlikely(!d->st_ext_io)) {
- d->st_ext_io = rrdset_create_localhost(
- "disk_ext"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.io"
- , "Amount of Discarded Data"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_IO + 1
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- d->rd_io_discards = rrddim_add(d->st_ext_io, "discards", NULL, d->sector_size, 1024, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ext_io);
- }
-
- last_discardsectors = rrddim_set_by_pointer(d->st_ext_io, d->rd_io_discards, discardsectors);
- rrdset_done(d->st_ext_io);
- }
-
- if(d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
- (reads || writes || discards || flushes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_ops = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_ops)) {
- d->st_ops = rrdset_create_localhost(
- "disk_ops"
- , d->chart_id
- , d->disk
- , family
- , "disk.ops"
- , "Disk Completed I/O Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_OPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL);
-
- d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ops);
- }
-
- last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads);
- last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes);
- rrdset_done(d->st_ops);
- }
-
- if (do_dc_stats && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- if (unlikely(!d->st_ext_ops)) {
- d->st_ext_ops = rrdset_create_localhost(
- "disk_ext_ops"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.ops"
- , "Disk Completed Extended I/O Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_OPS + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ext_ops, RRDSET_FLAG_DETAIL);
-
- d->rd_ops_discards = rrddim_add(d->st_ext_ops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- if (do_fl_stats)
- d->rd_ops_flushes = rrddim_add(d->st_ext_ops, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ext_ops);
- }
-
- last_discards = rrddim_set_by_pointer(d->st_ext_ops, d->rd_ops_discards, discards);
- if (do_fl_stats)
- last_flushes = rrddim_set_by_pointer(d->st_ext_ops, d->rd_ops_flushes, flushes);
- rrdset_done(d->st_ext_ops);
- }
-
- if(d->do_qops == CONFIG_BOOLEAN_YES || (d->do_qops == CONFIG_BOOLEAN_AUTO &&
- (queued_ios || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_qops = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_qops)) {
- d->st_qops = rrdset_create_localhost(
- "disk_qops"
- , d->chart_id
- , d->disk
- , family
- , "disk.qops"
- , "Disk Current I/O Operations"
- , "operations"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_QOPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL);
-
- d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_qops);
- }
-
- rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios);
- rrdset_done(d->st_qops);
- }
-
- if(d->do_backlog == CONFIG_BOOLEAN_YES || (d->do_backlog == CONFIG_BOOLEAN_AUTO &&
- (backlog_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_backlog = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_backlog)) {
- d->st_backlog = rrdset_create_localhost(
- "disk_backlog"
- , d->chart_id
- , d->disk
- , family
- , "disk.backlog"
- , "Disk Backlog"
- , "milliseconds"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_BACKLOG
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL);
-
- d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_backlog);
- }
-
- rrddim_set_by_pointer(d->st_backlog, d->rd_backlog_backlog, backlog_ms);
- rrdset_done(d->st_backlog);
- }
-
- if(d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO &&
- (busy_ms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_util = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_busy)) {
- d->st_busy = rrdset_create_localhost(
- "disk_busy"
- , d->chart_id
- , d->disk
- , family
- , "disk.busy"
- , "Disk Busy Time"
- , "milliseconds"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_BUSY
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_busy, RRDSET_FLAG_DETAIL);
-
- d->rd_busy_busy = rrddim_add(d->st_busy, "busy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_busy);
- }
-
- last_busy_ms = rrddim_set_by_pointer(d->st_busy, d->rd_busy_busy, busy_ms);
- rrdset_done(d->st_busy);
-
- if(unlikely(!d->st_util)) {
- d->st_util = rrdset_create_localhost(
- "disk_util"
- , d->chart_id
- , d->disk
- , family
- , "disk.util"
- , "Disk Utilization Time"
- , "% of time working"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_UTIL
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL);
-
- d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_util);
- }
-
- collected_number disk_utilization = (busy_ms - last_busy_ms) / (10 * update_every);
- if (disk_utilization > 100)
- disk_utilization = 100;
-
- rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, disk_utilization);
- rrdset_done(d->st_util);
- }
-
- if(d->do_mops == CONFIG_BOOLEAN_YES || (d->do_mops == CONFIG_BOOLEAN_AUTO &&
- (mreads || mwrites || mdiscards ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_mops = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_mops)) {
- d->st_mops = rrdset_create_localhost(
- "disk_mops"
- , d->chart_id
- , d->disk
- , family
- , "disk.mops"
- , "Disk Merged Operations"
- , "merged operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_MOPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL);
-
- d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_mops);
- }
-
- rrddim_set_by_pointer(d->st_mops, d->rd_mops_reads, mreads);
- rrddim_set_by_pointer(d->st_mops, d->rd_mops_writes, mwrites);
- rrdset_done(d->st_mops);
- }
-
- if(do_dc_stats && d->do_mops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- d->do_mops = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_ext_mops)) {
- d->st_ext_mops = rrdset_create_localhost(
- "disk_ext_mops"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.mops"
- , "Disk Merged Discard Operations"
- , "merged operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_MOPS + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ext_mops, RRDSET_FLAG_DETAIL);
-
- d->rd_mops_discards = rrddim_add(d->st_ext_mops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ext_mops);
- }
-
- rrddim_set_by_pointer(d->st_ext_mops, d->rd_mops_discards, mdiscards);
- rrdset_done(d->st_ext_mops);
- }
-
- if(d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO &&
- (readms || writems || discardms || flushms || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->do_iotime = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_iotime)) {
- d->st_iotime = rrdset_create_localhost(
- "disk_iotime"
- , d->chart_id
- , d->disk
- , family
- , "disk.iotime"
- , "Disk Total I/O Time"
- , "milliseconds/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_IOTIME
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL);
-
- d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_iotime);
- }
-
- last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms);
- last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems);
- rrdset_done(d->st_iotime);
- }
-
- if(do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- if(unlikely(!d->st_ext_iotime)) {
- d->st_ext_iotime = rrdset_create_localhost(
- "disk_ext_iotime"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.iotime"
- , "Disk Total I/O Time for Extended Operations"
- , "milliseconds/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_IOTIME + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ext_iotime, RRDSET_FLAG_DETAIL);
-
- d->rd_iotime_discards = rrddim_add(d->st_ext_iotime, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- if (do_fl_stats)
- d->rd_iotime_flushes = rrddim_add(d->st_ext_iotime, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ext_iotime);
- }
-
- last_discardms = rrddim_set_by_pointer(d->st_ext_iotime, d->rd_iotime_discards, discardms);
- if (do_fl_stats)
- last_flushms = rrddim_set_by_pointer(d->st_ext_iotime, d->rd_iotime_flushes, flushms);
- rrdset_done(d->st_ext_iotime);
- }
-
- // calculate differential charts
- // only if this is not the first time we run
-
- if(likely(dt)) {
- if( (d->do_iotime == CONFIG_BOOLEAN_YES || (d->do_iotime == CONFIG_BOOLEAN_AUTO &&
- (readms || writems ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
- (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
- (reads || writes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
-
- if(unlikely(!d->st_await)) {
- d->st_await = rrdset_create_localhost(
- "disk_await"
- , d->chart_id
- , d->disk
- , family
- , "disk.await"
- , "Average Completed I/O Operation Time"
- , "milliseconds/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AWAIT
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL);
-
- d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_await);
- }
-
- rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (reads - last_reads) ? (readms - last_readms) / (reads - last_reads) : 0);
- rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (writes - last_writes) ? (writems - last_writems) / (writes - last_writes) : 0);
- rrdset_done(d->st_await);
- }
-
- if (do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- if(unlikely(!d->st_ext_await)) {
- d->st_ext_await = rrdset_create_localhost(
- "disk_ext_await"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.await"
- , "Average Completed Extended I/O Operation Time"
- , "milliseconds/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AWAIT + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ext_await, RRDSET_FLAG_DETAIL);
-
- d->rd_await_discards = rrddim_add(d->st_ext_await, "discards", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- if (do_fl_stats)
- d->rd_await_flushes = rrddim_add(d->st_ext_await, "flushes", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_ext_await);
- }
-
- rrddim_set_by_pointer(
- d->st_ext_await, d->rd_await_discards,
- (discards - last_discards) ? (discardms - last_discardms) / (discards - last_discards) : 0);
-
- if (do_fl_stats)
- rrddim_set_by_pointer(
- d->st_ext_await, d->rd_await_flushes,
- (flushes - last_flushes) ? (flushms - last_flushms) / (flushes - last_flushes) : 0);
-
- rrdset_done(d->st_ext_await);
- }
-
- if( (d->do_io == CONFIG_BOOLEAN_YES || (d->do_io == CONFIG_BOOLEAN_AUTO &&
- (readsectors || writesectors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
- (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
- (reads || writes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
-
- if(unlikely(!d->st_avgsz)) {
- d->st_avgsz = rrdset_create_localhost(
- "disk_avgsz"
- , d->chart_id
- , d->disk
- , family
- , "disk.avgsz"
- , "Average Completed I/O Operation Bandwidth"
- , "KiB/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AVGSZ
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL);
-
- d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, d->sector_size * -1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_avgsz);
- }
-
- rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0);
- rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0);
- rrdset_done(d->st_avgsz);
- }
-
- if(do_dc_stats && d->do_io == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
- if(unlikely(!d->st_ext_avgsz)) {
- d->st_ext_avgsz = rrdset_create_localhost(
- "disk_ext_avgsz"
- , d->chart_id
- , d->disk
- , family
- , "disk_ext.avgsz"
- , "Average Amount of Discarded Data"
- , "KiB/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AVGSZ
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_ext_avgsz, RRDSET_FLAG_DETAIL);
-
- d->rd_avgsz_discards = rrddim_add(d->st_ext_avgsz, "discards", NULL, d->sector_size, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_ext_avgsz);
- }
-
- rrddim_set_by_pointer(
- d->st_ext_avgsz, d->rd_avgsz_discards,
- (discards - last_discards) ? (discardsectors - last_discardsectors) / (discards - last_discards) :
- 0);
- rrdset_done(d->st_ext_avgsz);
- }
-
- if( (d->do_util == CONFIG_BOOLEAN_YES || (d->do_util == CONFIG_BOOLEAN_AUTO &&
- (busy_ms ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) &&
- (d->do_ops == CONFIG_BOOLEAN_YES || (d->do_ops == CONFIG_BOOLEAN_AUTO &&
- (reads || writes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
-
- if(unlikely(!d->st_svctm)) {
- d->st_svctm = rrdset_create_localhost(
- "disk_svctm"
- , d->chart_id
- , d->disk
- , family
- , "disk.svctm"
- , "Average Service Time"
- , "milliseconds/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_SVCTM
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL);
-
- d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_svctm);
- }
-
- rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, ((reads - last_reads) + (writes - last_writes)) ? (busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) : 0);
- rrdset_done(d->st_svctm);
- }
- }
-
- // read bcache metrics and generate the bcache charts
-
- if(d->device_is_bcache && d->do_bcache != CONFIG_BOOLEAN_NO) {
- unsigned long long int
- stats_total_cache_bypass_hits = 0,
- stats_total_cache_bypass_misses = 0,
- stats_total_cache_hits = 0,
- stats_total_cache_miss_collisions = 0,
- stats_total_cache_misses = 0,
- stats_five_minute_cache_hit_ratio = 0,
- stats_hour_cache_hit_ratio = 0,
- stats_day_cache_hit_ratio = 0,
- stats_total_cache_hit_ratio = 0,
- cache_available_percent = 0,
- cache_readaheads = 0,
- cache_read_races = 0,
- cache_io_errors = 0,
- cache_congested = 0,
- dirty_data = 0,
- writeback_rate = 0;
-
- // read the bcache values
-
- if(d->bcache_filename_dirty_data)
- dirty_data = bcache_read_number_with_units(d->bcache_filename_dirty_data);
-
- if(d->bcache_filename_writeback_rate)
- writeback_rate = bcache_read_number_with_units(d->bcache_filename_writeback_rate);
-
- if(d->bcache_filename_cache_congested)
- cache_congested = bcache_read_number_with_units(d->bcache_filename_cache_congested);
-
- if(d->bcache_filename_cache_available_percent)
- read_single_number_file(d->bcache_filename_cache_available_percent, &cache_available_percent);
-
- if(d->bcache_filename_stats_five_minute_cache_hit_ratio)
- read_single_number_file(d->bcache_filename_stats_five_minute_cache_hit_ratio, &stats_five_minute_cache_hit_ratio);
-
- if(d->bcache_filename_stats_hour_cache_hit_ratio)
- read_single_number_file(d->bcache_filename_stats_hour_cache_hit_ratio, &stats_hour_cache_hit_ratio);
-
- if(d->bcache_filename_stats_day_cache_hit_ratio)
- read_single_number_file(d->bcache_filename_stats_day_cache_hit_ratio, &stats_day_cache_hit_ratio);
-
- if(d->bcache_filename_stats_total_cache_hit_ratio)
- read_single_number_file(d->bcache_filename_stats_total_cache_hit_ratio, &stats_total_cache_hit_ratio);
-
- if(d->bcache_filename_stats_total_cache_hits)
- read_single_number_file(d->bcache_filename_stats_total_cache_hits, &stats_total_cache_hits);
-
- if(d->bcache_filename_stats_total_cache_misses)
- read_single_number_file(d->bcache_filename_stats_total_cache_misses, &stats_total_cache_misses);
-
- if(d->bcache_filename_stats_total_cache_miss_collisions)
- read_single_number_file(d->bcache_filename_stats_total_cache_miss_collisions, &stats_total_cache_miss_collisions);
-
- if(d->bcache_filename_stats_total_cache_bypass_hits)
- read_single_number_file(d->bcache_filename_stats_total_cache_bypass_hits, &stats_total_cache_bypass_hits);
-
- if(d->bcache_filename_stats_total_cache_bypass_misses)
- read_single_number_file(d->bcache_filename_stats_total_cache_bypass_misses, &stats_total_cache_bypass_misses);
-
- if(d->bcache_filename_stats_total_cache_readaheads)
- cache_readaheads = bcache_read_number_with_units(d->bcache_filename_stats_total_cache_readaheads);
-
- if(d->bcache_filename_cache_read_races)
- read_single_number_file(d->bcache_filename_cache_read_races, &cache_read_races);
-
- if(d->bcache_filename_cache_io_errors)
- read_single_number_file(d->bcache_filename_cache_io_errors, &cache_io_errors);
-
- if(d->bcache_filename_priority_stats && global_bcache_priority_stats_update_every >= 1)
- bcache_read_priority_stats(d, family, global_bcache_priority_stats_update_every, dt);
-
- // update the charts
-
- {
- if(unlikely(!d->st_bcache_hit_ratio)) {
- d->st_bcache_hit_ratio = rrdset_create_localhost(
- "disk_bcache_hit_ratio"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_hit_ratio"
- , "BCache Cache Hit Ratio"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_HIT_RATIO
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- d->rd_bcache_hit_ratio_5min = rrddim_add(d->st_bcache_hit_ratio, "5min", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_hit_ratio_1hour = rrddim_add(d->st_bcache_hit_ratio, "1hour", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_hit_ratio_1day = rrddim_add(d->st_bcache_hit_ratio, "1day", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_hit_ratio_total = rrddim_add(d->st_bcache_hit_ratio, "ever", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_bcache_hit_ratio);
- }
-
- rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_5min, stats_five_minute_cache_hit_ratio);
- rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1hour, stats_hour_cache_hit_ratio);
- rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_1day, stats_day_cache_hit_ratio);
- rrddim_set_by_pointer(d->st_bcache_hit_ratio, d->rd_bcache_hit_ratio_total, stats_total_cache_hit_ratio);
- rrdset_done(d->st_bcache_hit_ratio);
- }
-
- {
-
- if(unlikely(!d->st_bcache_rates)) {
- d->st_bcache_rates = rrdset_create_localhost(
- "disk_bcache_rates"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_rates"
- , "BCache Rates"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_RATES
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- d->rd_bcache_rate_congested = rrddim_add(d->st_bcache_rates, "congested", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_bcache_rate_writeback = rrddim_add(d->st_bcache_rates, "writeback", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_bcache_rates);
- }
-
- rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_writeback, writeback_rate);
- rrddim_set_by_pointer(d->st_bcache_rates, d->rd_bcache_rate_congested, cache_congested);
- rrdset_done(d->st_bcache_rates);
- }
-
- {
- if(unlikely(!d->st_bcache_size)) {
- d->st_bcache_size = rrdset_create_localhost(
- "disk_bcache_size"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_size"
- , "BCache Cache Sizes"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_SIZE
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- d->rd_bcache_dirty_size = rrddim_add(d->st_bcache_size, "dirty", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_bcache_size);
- }
-
- rrddim_set_by_pointer(d->st_bcache_size, d->rd_bcache_dirty_size, dirty_data);
- rrdset_done(d->st_bcache_size);
- }
-
- {
- if(unlikely(!d->st_bcache_usage)) {
- d->st_bcache_usage = rrdset_create_localhost(
- "disk_bcache_usage"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_usage"
- , "BCache Cache Usage"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_USAGE
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- d->rd_bcache_available_percent = rrddim_add(d->st_bcache_usage, "avail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_bcache_usage);
- }
-
- rrddim_set_by_pointer(d->st_bcache_usage, d->rd_bcache_available_percent, cache_available_percent);
- rrdset_done(d->st_bcache_usage);
- }
-
- {
-
- if(unlikely(!d->st_bcache_cache_read_races)) {
- d->st_bcache_cache_read_races = rrdset_create_localhost(
- "disk_bcache_cache_read_races"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_cache_read_races"
- , "BCache Cache Read Races"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_CACHE_READ_RACES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- d->rd_bcache_cache_read_races = rrddim_add(d->st_bcache_cache_read_races, "races", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_bcache_cache_io_errors = rrddim_add(d->st_bcache_cache_read_races, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_bcache_cache_read_races);
- }
-
- rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_read_races, cache_read_races);
- rrddim_set_by_pointer(d->st_bcache_cache_read_races, d->rd_bcache_cache_io_errors, cache_io_errors);
- rrdset_done(d->st_bcache_cache_read_races);
- }
-
- if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO &&
- (stats_total_cache_hits ||
- stats_total_cache_misses ||
- stats_total_cache_miss_collisions ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
-
- if(unlikely(!d->st_bcache)) {
- d->st_bcache = rrdset_create_localhost(
- "disk_bcache"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache"
- , "BCache Cache I/O Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_OPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL);
-
- d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_bcache_readaheads = rrddim_add(d->st_bcache, "readaheads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_bcache);
- }
-
- rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_hits, stats_total_cache_hits);
- rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_misses, stats_total_cache_misses);
- rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_miss_collisions, stats_total_cache_miss_collisions);
- rrddim_set_by_pointer(d->st_bcache, d->rd_bcache_readaheads, cache_readaheads);
- rrdset_done(d->st_bcache);
- }
-
- if(d->do_bcache == CONFIG_BOOLEAN_YES || (d->do_bcache == CONFIG_BOOLEAN_AUTO &&
- (stats_total_cache_bypass_hits ||
- stats_total_cache_bypass_misses ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
-
- if(unlikely(!d->st_bcache_bypass)) {
- d->st_bcache_bypass = rrdset_create_localhost(
- "disk_bcache_bypass"
- , d->chart_id
- , d->disk
- , family
- , "disk.bcache_bypass"
- , "BCache Cache Bypass I/O Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_BCACHE_BYPASS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL);
-
- d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_bcache_bypass);
- }
-
- rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_hits, stats_total_cache_bypass_hits);
- rrddim_set_by_pointer(d->st_bcache_bypass, d->rd_bcache_bypass_misses, stats_total_cache_bypass_misses);
- rrdset_done(d->st_bcache_bypass);
- }
- }
-
- d->function_ready = !d->excluded;
- }
-
- diskstats_cleanup_disks();
-
- netdata_mutex_unlock(&diskstats_dev_mutex);
- // update the system total I/O
-
- if(global_do_io == CONFIG_BOOLEAN_YES || (global_do_io == CONFIG_BOOLEAN_AUTO &&
- (system_read_kb || system_write_kb ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- static RRDSET *st_io = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_io)) {
- st_io = rrdset_create_localhost(
- "system"
- , "io"
- , NULL
- , "disk"
- , NULL
- , "Disk I/O"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_SYSTEM_IO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_io, rd_in, system_read_kb);
- rrddim_set_by_pointer(st_io, rd_out, system_write_kb);
- rrdset_done(st_io);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c
deleted file mode 100644
index 37071b22f..000000000
--- a/collectors/proc.plugin/proc_interrupts.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_INTERRUPTS_NAME "/proc/interrupts"
-#define CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INTERRUPTS_NAME
-
-#define MAX_INTERRUPT_NAME 50
-
-struct cpu_interrupt {
- unsigned long long value;
- RRDDIM *rd;
-};
-
-struct interrupt {
- int used;
- char *id;
- char name[MAX_INTERRUPT_NAME + 1];
- RRDDIM *rd;
- unsigned long long total;
- struct cpu_interrupt cpu[];
-};
-
-// since each interrupt is variable in size
-// we use this to calculate its record size
-#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
-
-// given a base, get a pointer to each record
-#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
-
-static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
- static struct interrupt *irrs = NULL;
- static size_t allocated = 0;
-
- if(unlikely(lines != allocated)) {
- size_t l;
- int c;
-
- irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
-
- // reset all interrupt RRDDIM pointers as any line could have shifted
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- irr->rd = NULL;
- irr->name[0] = '\0';
- for(c = 0; c < cpus ;c++)
- irr->cpu[c].rd = NULL;
- }
-
- allocated = lines;
- }
-
- return irrs;
-}
-
-int do_proc_interrupts(int update_every, usec_t dt) {
- (void)dt;
- static procfile *ff = NULL;
- static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
- struct interrupt *irrs = NULL;
-
- if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
- do_per_core = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "interrupts per core", CONFIG_BOOLEAN_AUTO);
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts");
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- }
- if(unlikely(!ff))
- return 1;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
- size_t words = procfile_linewords(ff, 0);
-
- if(unlikely(!lines)) {
- collector_error("Cannot read /proc/interrupts, zero lines reported.");
- return 1;
- }
-
- // find how many CPUs are there
- if(unlikely(cpus == -1)) {
- uint32_t w;
- cpus = 0;
- for(w = 0; w < words ; w++) {
- if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
- cpus++;
- }
- }
-
- if(unlikely(!cpus)) {
- collector_error("PLUGIN: PROC_INTERRUPTS: Cannot find the number of CPUs in /proc/interrupts");
- return 1;
- }
-
- // allocate the size we need;
- irrs = get_interrupts_array(lines, cpus);
- irrs[0].used = 0;
-
- // loop through all lines
- for(l = 1; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- irr->used = 0;
- irr->total = 0;
-
- words = procfile_linewords(ff, l);
- if(unlikely(!words)) continue;
-
- irr->id = procfile_lineword(ff, l, 0);
- if(unlikely(!irr->id || !irr->id[0])) continue;
-
- size_t idlen = strlen(irr->id);
- if(irr->id[idlen - 1] == ':')
- irr->id[--idlen] = '\0';
-
- int c;
- for(c = 0; c < cpus ;c++) {
- if(likely((c + 1) < (int)words))
- irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t) (c + 1)), NULL);
- else
- irr->cpu[c].value = 0;
-
- irr->total += irr->cpu[c].value;
- }
-
- if(unlikely(isdigit(irr->id[0]) && (uint32_t)(cpus + 2) < words)) {
- strncpyz(irr->name, procfile_lineword(ff, l, words - 1), MAX_INTERRUPT_NAME);
- size_t nlen = strlen(irr->name);
- if(likely(nlen + 1 + idlen <= MAX_INTERRUPT_NAME)) {
- irr->name[nlen] = '_';
- strncpyz(&irr->name[nlen + 1], irr->id, MAX_INTERRUPT_NAME - nlen - 1);
- }
- else {
- irr->name[MAX_INTERRUPT_NAME - idlen - 1] = '_';
- strncpyz(&irr->name[MAX_INTERRUPT_NAME - idlen], irr->id, idlen);
- }
- }
- else {
- strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
- }
-
- irr->used = 1;
- }
-
- static RRDSET *st_system_interrupts = NULL;
- if(unlikely(!st_system_interrupts))
- st_system_interrupts = rrdset_create_localhost(
- "system"
- , "interrupts"
- , NULL
- , "interrupts"
- , NULL
- , "System interrupts"
- , "interrupts/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
- , NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- if(irr->used && irr->total) {
- // some interrupt may have changed without changing the total number of lines
- // if the same number of interrupts have been added and removed between two
- // calls of this function.
- if(unlikely(!irr->rd || strncmp(rrddim_name(irr->rd), irr->name, MAX_INTERRUPT_NAME) != 0)) {
- irr->rd = rrddim_add(st_system_interrupts, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_reset_name(st_system_interrupts, irr->rd, irr->name);
-
- // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
- if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
- int c;
- for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
- }
- }
-
- rrddim_set_by_pointer(st_system_interrupts, irr->rd, irr->total);
- }
- }
-
- rrdset_done(st_system_interrupts);
-
- if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
- static RRDSET **core_st = NULL;
- static int old_cpus = 0;
-
- if(old_cpus < cpus) {
- core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
- memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
- old_cpus = cpus;
- }
-
- int c;
-
- for(c = 0; c < cpus ;c++) {
- if(unlikely(!core_st[c])) {
- char id[50+1];
- snprintfz(id, sizeof(id) - 1, "cpu%d_interrupts", c);
-
- char title[100+1];
- snprintfz(title, sizeof(title) - 1, "CPU Interrupts");
- core_st[c] = rrdset_create_localhost(
- "cpu"
- , id
- , NULL
- , "interrupts"
- , "cpu.interrupts"
- , title
- , "interrupts/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_INTERRUPTS_NAME
- , NETDATA_CHART_PRIO_INTERRUPTS_PER_CORE + c
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- char core[50+1];
- snprintfz(core, sizeof(core) - 1, "cpu%d", c);
- rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO);
- }
-
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
- if(unlikely(!irr->cpu[c].rd)) {
- irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_reset_name(core_st[c], irr->cpu[c].rd, irr->name);
- }
-
- rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
- }
- }
-
- rrdset_done(core_st[c]);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_loadavg.c b/collectors/proc.plugin/proc_loadavg.c
deleted file mode 100644
index 106cf9087..000000000
--- a/collectors/proc.plugin/proc_loadavg.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_LOADAVG_NAME "/proc/loadavg"
-#define CONFIG_SECTION_PLUGIN_PROC_LOADAVG "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_LOADAVG_NAME
-
-// linux calculates this once every 5 seconds
-#define MIN_LOADAVG_UPDATE_EVERY 5
-
-int do_proc_loadavg(int update_every, usec_t dt) {
- static procfile *ff = NULL;
- static int do_loadavg = -1, do_all_processes = -1;
- static usec_t next_loadavg_dt = 0;
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/loadavg");
-
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "filename to monitor", filename), " \t,:|/", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- if(unlikely(do_loadavg == -1)) {
- do_loadavg = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable load average", 1);
- do_all_processes = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_LOADAVG, "enable total processes", 1);
- }
-
- if(unlikely(procfile_lines(ff) < 1)) {
- collector_error("/proc/loadavg has no lines.");
- return 1;
- }
- if(unlikely(procfile_linewords(ff, 0) < 6)) {
- collector_error("/proc/loadavg has less than 6 words in it.");
- return 1;
- }
-
- double load1 = strtod(procfile_lineword(ff, 0, 0), NULL);
- double load5 = strtod(procfile_lineword(ff, 0, 1), NULL);
- double load15 = strtod(procfile_lineword(ff, 0, 2), NULL);
-
- //unsigned long long running_processes = str2ull(procfile_lineword(ff, 0, 3));
- unsigned long long active_processes = str2ull(procfile_lineword(ff, 0, 4), NULL);
-
- //get system pid_max
- unsigned long long max_processes = get_system_pid_max();
- //
- //unsigned long long next_pid = str2ull(procfile_lineword(ff, 0, 5));
-
- if(next_loadavg_dt <= dt) {
- if(likely(do_loadavg)) {
- static RRDSET *load_chart = NULL;
- static RRDDIM *rd_load1 = NULL, *rd_load5 = NULL, *rd_load15 = NULL;
-
- if(unlikely(!load_chart)) {
- load_chart = rrdset_create_localhost(
- "system"
- , "load"
- , NULL
- , "load"
- , NULL
- , "System Load Average"
- , "load"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_LOADAVG_NAME
- , NETDATA_CHART_PRIO_SYSTEM_LOAD
- , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_load1 = rrddim_add(load_chart, "load1", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rd_load5 = rrddim_add(load_chart, "load5", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rd_load15 = rrddim_add(load_chart, "load15", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(load_chart, rd_load1, (collected_number) (load1 * 1000));
- rrddim_set_by_pointer(load_chart, rd_load5, (collected_number) (load5 * 1000));
- rrddim_set_by_pointer(load_chart, rd_load15, (collected_number) (load15 * 1000));
- rrdset_done(load_chart);
-
- next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
- }
- else
- next_loadavg_dt = MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC;
- }
- else
- next_loadavg_dt -= dt;
-
-
- if(likely(do_all_processes)) {
- static RRDSET *processes_chart = NULL;
- static RRDDIM *rd_active = NULL;
- static const RRDSETVAR_ACQUIRED *rd_pidmax;
-
- if(unlikely(!processes_chart)) {
- processes_chart = rrdset_create_localhost(
- "system"
- , "active_processes"
- , NULL
- , "processes"
- , NULL
- , "System Active Processes"
- , "processes"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_LOADAVG_NAME
- , NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_active = rrddim_add(processes_chart, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_pidmax = rrdsetvar_custom_chart_variable_add_and_acquire(processes_chart, "pidmax");
- }
-
- rrddim_set_by_pointer(processes_chart, rd_active, active_processes);
- rrdsetvar_custom_chart_variable_set(processes_chart, rd_pidmax, max_processes);
- rrdset_done(processes_chart);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c
deleted file mode 100644
index 3857d9ec4..000000000
--- a/collectors/proc.plugin/proc_mdstat.c
+++ /dev/null
@@ -1,640 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_MDSTAT_NAME "/proc/mdstat"
-
-struct raid {
- int redundant;
- char *name;
- uint32_t hash;
- char *level;
-
- RRDDIM *rd_health;
- unsigned long long failed_disks;
-
- RRDSET *st_disks;
- RRDDIM *rd_down;
- RRDDIM *rd_inuse;
- unsigned long long total_disks;
- unsigned long long inuse_disks;
-
- RRDSET *st_operation;
- RRDDIM *rd_check;
- RRDDIM *rd_resync;
- RRDDIM *rd_recovery;
- RRDDIM *rd_reshape;
- unsigned long long check;
- unsigned long long resync;
- unsigned long long recovery;
- unsigned long long reshape;
-
- RRDSET *st_finish;
- RRDDIM *rd_finish_in;
- unsigned long long finish_in;
-
- RRDSET *st_speed;
- RRDDIM *rd_speed;
- unsigned long long speed;
-
- char *mismatch_cnt_filename;
- RRDSET *st_mismatch_cnt;
- RRDDIM *rd_mismatch_cnt;
- unsigned long long mismatch_cnt;
-
- RRDSET *st_nonredundant;
- RRDDIM *rd_nonredundant;
-};
-
-struct old_raid {
- int redundant;
- char *name;
- uint32_t hash;
- int found;
-};
-
-static inline char *remove_trailing_chars(char *s, char c)
-{
- while (*s) {
- if (unlikely(*s == c)) {
- *s = '\0';
- }
- s++;
- }
- return s;
-}
-
-static inline void make_chart_obsolete(char *name, const char *id_modifier)
-{
- char id[50 + 1];
- RRDSET *st = NULL;
-
- if (likely(name && id_modifier)) {
- snprintfz(id, sizeof(id) - 1, "mdstat.%s_%s", name, id_modifier);
- st = rrdset_find_active_byname_localhost(id);
- if (likely(st))
- rrdset_is_obsolete___safe_from_collector_thread(st);
- }
-}
-
-static void add_labels_to_mdstat(struct raid *raid, RRDSET *st) {
- rrdlabels_add(st->rrdlabels, "device", raid->name, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "raid_level", raid->level, RRDLABEL_SRC_AUTO);
-}
-
-int do_proc_mdstat(int update_every, usec_t dt)
-{
- (void)dt;
- static procfile *ff = NULL;
- static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1,
- do_mismatch_config = -1;
- static int make_charts_obsolete = -1;
- static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
- static struct raid *raids = NULL;
- static size_t raids_allocated = 0;
- size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
- static struct old_raid *old_raids = NULL;
- static size_t old_raids_allocated = 0;
- size_t old_raid_idx = 0;
-
- if (unlikely(do_health == -1)) {
- do_health =
- config_get_boolean("plugin:proc:/proc/mdstat", "faulty devices", CONFIG_BOOLEAN_YES);
- do_nonredundant =
- config_get_boolean("plugin:proc:/proc/mdstat", "nonredundant arrays availability", CONFIG_BOOLEAN_YES);
- do_mismatch_config =
- config_get_boolean_ondemand("plugin:proc:/proc/mdstat", "mismatch count", CONFIG_BOOLEAN_AUTO);
- do_disks =
- config_get_boolean("plugin:proc:/proc/mdstat", "disk stats", CONFIG_BOOLEAN_YES);
- do_operations =
- config_get_boolean("plugin:proc:/proc/mdstat", "operation status", CONFIG_BOOLEAN_YES);
-
- make_charts_obsolete =
- config_get_boolean("plugin:proc:/proc/mdstat", "make charts obsolete", CONFIG_BOOLEAN_YES);
-
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/mdstat");
- mdstat_filename = config_get("plugin:proc:/proc/mdstat", "filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/block/%s/md/mismatch_cnt");
- mismatch_cnt_filename = config_get("plugin:proc:/proc/mdstat", "mismatch_cnt filename to monitor", filename);
- }
-
- if (unlikely(!ff)) {
- ff = procfile_open(mdstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff))
- return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff))
- return 0; // we return 0, so that we will retry opening it next time
-
- size_t lines = procfile_lines(ff);
- size_t words = 0;
-
- if (unlikely(lines < 2)) {
- collector_error("Cannot read /proc/mdstat. Expected 2 or more lines, read %zu.", lines);
- return 1;
- }
-
- // find how many raids are there
- size_t l;
- raids_num = 0;
- for (l = 1; l < lines - 2; l++) {
- if (unlikely(procfile_lineword(ff, l, 1)[0] == 'a')) // check if the raid is active
- raids_num++;
- }
-
- if (unlikely(!raids_num && !old_raids_allocated))
- return 0; // we return 0, so that we will retry searching for raids next time
-
- // allocate the memory we need;
- if (unlikely(raids_num != raids_allocated)) {
- for (raid_idx = 0; raid_idx < raids_allocated; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
- freez(raid->name);
- freez(raid->level);
- freez(raid->mismatch_cnt_filename);
- }
- if (raids_num) {
- raids = (struct raid *)reallocz(raids, raids_num * sizeof(struct raid));
- memset(raids, 0, raids_num * sizeof(struct raid));
- } else {
- freez(raids);
- raids = NULL;
- }
- raids_allocated = raids_num;
- }
-
- // loop through all lines except the first and the last ones
- for (l = 1, raid_idx = 0; l < (lines - 2) && raid_idx < raids_num; l++) {
- struct raid *raid = &raids[raid_idx];
- raid->redundant = 0;
-
- words = procfile_linewords(ff, l);
-
- if (unlikely(words < 3))
- continue;
-
- if (unlikely(procfile_lineword(ff, l, 1)[0] != 'a'))
- continue;
-
- if (unlikely(!raid->name)) {
- raid->name = strdupz(procfile_lineword(ff, l, 0));
- raid->hash = simple_hash(raid->name);
- raid->level = strdupz(procfile_lineword(ff, l, 2));
- } else if (unlikely(strcmp(raid->name, procfile_lineword(ff, l, 0)))) {
- freez(raid->name);
- freez(raid->mismatch_cnt_filename);
- freez(raid->level);
- memset(raid, 0, sizeof(struct raid));
- raid->name = strdupz(procfile_lineword(ff, l, 0));
- raid->hash = simple_hash(raid->name);
- raid->level = strdupz(procfile_lineword(ff, l, 2));
- }
-
- if (unlikely(!raid->name || !raid->name[0]))
- continue;
-
- raid_idx++;
-
- // check if raid has disk status
- l++;
- words = procfile_linewords(ff, l);
- if (words < 2 || procfile_lineword(ff, l, words - 1)[0] != '[')
- continue;
-
- // split inuse and total number of disks
- if (likely(do_health || do_disks)) {
- char *s = NULL, *str_total = NULL, *str_inuse = NULL;
-
- s = procfile_lineword(ff, l, words - 2);
- if (unlikely(s[0] != '[')) {
- collector_error("Cannot read /proc/mdstat raid health status. Unexpected format: missing opening bracket.");
- continue;
- }
- str_total = ++s;
- while (*s) {
- if (unlikely(*s == '/')) {
- *s = '\0';
- str_inuse = s + 1;
- } else if (unlikely(*s == ']')) {
- *s = '\0';
- break;
- }
- s++;
- }
- if (unlikely(str_total[0] == '\0' || !str_inuse || str_inuse[0] == '\0')) {
- collector_error("Cannot read /proc/mdstat raid health status. Unexpected format.");
- continue;
- }
-
- raid->inuse_disks = str2ull(str_inuse, NULL);
- raid->total_disks = str2ull(str_total, NULL);
- raid->failed_disks = raid->total_disks - raid->inuse_disks;
- }
-
- raid->redundant = 1;
- redundant_num++;
- l++;
-
- // check if any operation is performed on the raid
- if (likely(do_operations)) {
- char *s = NULL;
-
- raid->check = 0;
- raid->resync = 0;
- raid->recovery = 0;
- raid->reshape = 0;
- raid->finish_in = 0;
- raid->speed = 0;
-
- words = procfile_linewords(ff, l);
-
- if (likely(words < 2))
- continue;
-
- if (unlikely(procfile_lineword(ff, l, 0)[0] != '['))
- continue;
-
- if (unlikely(words < 7)) {
- collector_error("Cannot read /proc/mdstat line. Expected 7 params, read %zu.", words);
- continue;
- }
-
- char *word;
- word = procfile_lineword(ff, l, 3);
- remove_trailing_chars(word, '%');
-
- unsigned long long percentage = (unsigned long long)(str2ndd(word, NULL) * 100);
- // possible operations: check, resync, recovery, reshape
- // 4-th character is unique for each operation so it is checked
- switch (procfile_lineword(ff, l, 1)[3]) {
- case 'c': // check
- raid->check = percentage;
- break;
- case 'y': // resync
- raid->resync = percentage;
- break;
- case 'o': // recovery
- raid->recovery = percentage;
- break;
- case 'h': // reshape
- raid->reshape = percentage;
- break;
- }
-
- word = procfile_lineword(ff, l, 5);
- s = remove_trailing_chars(word, 'm'); // remove trailing "min"
-
- word += 7; // skip leading "finish="
-
- if (likely(s > word))
- raid->finish_in = (unsigned long long)(str2ndd(word, NULL) * 60);
-
- word = procfile_lineword(ff, l, 6);
- s = remove_trailing_chars(word, 'K'); // remove trailing "K/sec"
-
- word += 6; // skip leading "speed="
-
- if (likely(s > word))
- raid->speed = str2ull(word, NULL);
- }
- }
-
- // read mismatch_cnt files
- if (do_mismatch == -1) {
- if (do_mismatch_config == CONFIG_BOOLEAN_AUTO) {
- if (raids_num > 50)
- do_mismatch = CONFIG_BOOLEAN_NO;
- else
- do_mismatch = CONFIG_BOOLEAN_YES;
- } else
- do_mismatch = do_mismatch_config;
- }
-
- if (likely(do_mismatch)) {
- for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
- char filename[FILENAME_MAX + 1];
- struct raid *raid = &raids[raid_idx];
-
- if (likely(raid->redundant)) {
- if (unlikely(!raid->mismatch_cnt_filename)) {
- snprintfz(filename, FILENAME_MAX, mismatch_cnt_filename, raid->name);
- raid->mismatch_cnt_filename = strdupz(filename);
- }
- if (unlikely(read_single_number_file(raid->mismatch_cnt_filename, &raid->mismatch_cnt))) {
- collector_error("Cannot read file '%s'", raid->mismatch_cnt_filename);
- do_mismatch = CONFIG_BOOLEAN_NO;
- collector_error("Monitoring for mismatch count has been disabled");
- break;
- }
- }
- }
- }
-
- // check for disappeared raids
- for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
- int found = 0;
-
- for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
-
- if (unlikely(
- raid->hash == old_raid->hash && !strcmp(raid->name, old_raid->name) &&
- raid->redundant == old_raid->redundant))
- found = 1;
- }
-
- old_raid->found = found;
- }
-
- int raid_disappeared = 0;
- for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
-
- if (unlikely(!old_raid->found)) {
- if (likely(make_charts_obsolete)) {
- make_chart_obsolete(old_raid->name, "disks");
- make_chart_obsolete(old_raid->name, "mismatch");
- make_chart_obsolete(old_raid->name, "operation");
- make_chart_obsolete(old_raid->name, "finish");
- make_chart_obsolete(old_raid->name, "speed");
- make_chart_obsolete(old_raid->name, "availability");
- }
- raid_disappeared = 1;
- }
- }
-
- // allocate memory for nonredundant arrays
- if (unlikely(raid_disappeared || old_raids_allocated != raids_num)) {
- for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- freez(old_raids[old_raid_idx].name);
- }
- if (likely(raids_num)) {
- old_raids = reallocz(old_raids, sizeof(struct old_raid) * raids_num);
- memset(old_raids, 0, sizeof(struct old_raid) * raids_num);
- } else {
- freez(old_raids);
- old_raids = NULL;
- }
- old_raids_allocated = raids_num;
- for (old_raid_idx = 0; old_raid_idx < old_raids_allocated; old_raid_idx++) {
- struct old_raid *old_raid = &old_raids[old_raid_idx];
- struct raid *raid = &raids[old_raid_idx];
-
- old_raid->name = strdupz(raid->name);
- old_raid->hash = raid->hash;
- old_raid->redundant = raid->redundant;
- }
- }
-
- if (likely(do_health && redundant_num)) {
- static RRDSET *st_mdstat_health = NULL;
- if (unlikely(!st_mdstat_health)) {
- st_mdstat_health = rrdset_create_localhost(
- "mdstat",
- "mdstat_health",
- NULL,
- "health",
- "md.health",
- "Faulty Devices In MD",
- "failed disks",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_HEALTH,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(st_mdstat_health);
- }
-
- if (!redundant_num) {
- if (likely(make_charts_obsolete))
- make_chart_obsolete("mdstat", "health");
- } else {
- for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
-
- if (likely(raid->redundant)) {
- if (unlikely(!raid->rd_health && !(raid->rd_health = rrddim_find_active(st_mdstat_health, raid->name))))
- raid->rd_health = rrddim_add(st_mdstat_health, raid->name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(st_mdstat_health, raid->rd_health, raid->failed_disks);
- }
- }
-
- rrdset_done(st_mdstat_health);
- }
- }
-
- for (raid_idx = 0; raid_idx < raids_num; raid_idx++) {
- struct raid *raid = &raids[raid_idx];
- char id[50 + 1];
- char family[50 + 1];
-
- if (likely(raid->redundant)) {
- if (likely(do_disks)) {
- snprintfz(id, sizeof(id) - 1, "%s_disks", raid->name);
-
- if (unlikely(!raid->st_disks && !(raid->st_disks = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_disks = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.disks",
- "Disks Stats",
- "disks",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_DISKS + raid_idx * 10,
- update_every,
- RRDSET_TYPE_STACKED);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_disks);
-
- add_labels_to_mdstat(raid, raid->st_disks);
- }
-
- if (unlikely(!raid->rd_inuse && !(raid->rd_inuse = rrddim_find_active(raid->st_disks, "inuse"))))
- raid->rd_inuse = rrddim_add(raid->st_disks, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- if (unlikely(!raid->rd_down && !(raid->rd_down = rrddim_find_active(raid->st_disks, "down"))))
- raid->rd_down = rrddim_add(raid->st_disks, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_disks, raid->rd_inuse, raid->inuse_disks);
- rrddim_set_by_pointer(raid->st_disks, raid->rd_down, raid->failed_disks);
- rrdset_done(raid->st_disks);
- }
-
- if (likely(do_mismatch)) {
- snprintfz(id, sizeof(id) - 1, "%s_mismatch", raid->name);
-
- if (unlikely(!raid->st_mismatch_cnt && !(raid->st_mismatch_cnt = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_mismatch_cnt = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.mismatch_cnt",
- "Mismatch Count",
- "unsynchronized blocks",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_MISMATCH + raid_idx * 10,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_mismatch_cnt);
-
- add_labels_to_mdstat(raid, raid->st_mismatch_cnt);
- }
-
- if (unlikely(!raid->rd_mismatch_cnt && !(raid->rd_mismatch_cnt = rrddim_find_active(raid->st_mismatch_cnt, "count"))))
- raid->rd_mismatch_cnt = rrddim_add(raid->st_mismatch_cnt, "count", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_mismatch_cnt, raid->rd_mismatch_cnt, raid->mismatch_cnt);
- rrdset_done(raid->st_mismatch_cnt);
- }
-
- if (likely(do_operations)) {
- snprintfz(id, sizeof(id) - 1, "%s_operation", raid->name);
-
- if (unlikely(!raid->st_operation && !(raid->st_operation = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_operation = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.status",
- "Current Status",
- "percent",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_OPERATION + raid_idx * 10,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_operation);
-
- add_labels_to_mdstat(raid, raid->st_operation);
- }
-
- if(unlikely(!raid->rd_check && !(raid->rd_check = rrddim_find_active(raid->st_operation, "check"))))
- raid->rd_check = rrddim_add(raid->st_operation, "check", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_resync && !(raid->rd_resync = rrddim_find_active(raid->st_operation, "resync"))))
- raid->rd_resync = rrddim_add(raid->st_operation, "resync", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_recovery && !(raid->rd_recovery = rrddim_find_active(raid->st_operation, "recovery"))))
- raid->rd_recovery = rrddim_add(raid->st_operation, "recovery", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- if(unlikely(!raid->rd_reshape && !(raid->rd_reshape = rrddim_find_active(raid->st_operation, "reshape"))))
- raid->rd_reshape = rrddim_add(raid->st_operation, "reshape", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_operation, raid->rd_check, raid->check);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_resync, raid->resync);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_recovery, raid->recovery);
- rrddim_set_by_pointer(raid->st_operation, raid->rd_reshape, raid->reshape);
- rrdset_done(raid->st_operation);
-
- snprintfz(id, sizeof(id) - 1, "%s_finish", raid->name);
- if (unlikely(!raid->st_finish && !(raid->st_finish = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_finish = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.expected_time_until_operation_finish",
- "Approximate Time Until Finish",
- "seconds",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_FINISH + raid_idx * 10,
- update_every, RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_finish);
-
- add_labels_to_mdstat(raid, raid->st_finish);
- }
-
- if(unlikely(!raid->rd_finish_in && !(raid->rd_finish_in = rrddim_find_active(raid->st_finish, "finish_in"))))
- raid->rd_finish_in = rrddim_add(raid->st_finish, "finish_in", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_finish, raid->rd_finish_in, raid->finish_in);
- rrdset_done(raid->st_finish);
-
- snprintfz(id, sizeof(id) - 1, "%s_speed", raid->name);
- if (unlikely(!raid->st_speed && !(raid->st_speed = rrdset_find_active_byname_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_speed = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.operation_speed",
- "Operation Speed",
- "KiB/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_SPEED + raid_idx * 10,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_speed);
-
- add_labels_to_mdstat(raid, raid->st_speed);
- }
-
- if (unlikely(!raid->rd_speed && !(raid->rd_speed = rrddim_find_active(raid->st_speed, "speed"))))
- raid->rd_speed = rrddim_add(raid->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_speed, raid->rd_speed, raid->speed);
- rrdset_done(raid->st_speed);
- }
- } else {
- if (likely(do_nonredundant)) {
- snprintfz(id, sizeof(id) - 1, "%s_availability", raid->name);
-
- if (unlikely(!raid->st_nonredundant && !(raid->st_nonredundant = rrdset_find_active_localhost(id)))) {
- snprintfz(family, sizeof(family) - 1, "%s (%s)", raid->name, raid->level);
-
- raid->st_nonredundant = rrdset_create_localhost(
- "mdstat",
- id,
- NULL,
- family,
- "md.nonredundant",
- "Nonredundant Array Availability",
- "boolean",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_MDSTAT_NAME,
- NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT + raid_idx * 10,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_isnot_obsolete___safe_from_collector_thread(raid->st_nonredundant);
-
- add_labels_to_mdstat(raid, raid->st_nonredundant);
- }
-
- if (unlikely(!raid->rd_nonredundant && !(raid->rd_nonredundant = rrddim_find_active(raid->st_nonredundant, "available"))))
- raid->rd_nonredundant = rrddim_add(raid->st_nonredundant, "available", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrddim_set_by_pointer(raid->st_nonredundant, raid->rd_nonredundant, 1);
- rrdset_done(raid->st_nonredundant);
- }
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_meminfo.c b/collectors/proc.plugin/proc_meminfo.c
deleted file mode 100644
index a357cc782..000000000
--- a/collectors/proc.plugin/proc_meminfo.c
+++ /dev/null
@@ -1,849 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_MEMINFO_NAME "/proc/meminfo"
-#define CONFIG_SECTION_PLUGIN_PROC_MEMINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_MEMINFO_NAME
-
-int do_proc_meminfo(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
- static int do_ram = -1
- , do_swap = -1
- , do_hwcorrupt = -1
- , do_committed = -1
- , do_writeback = -1
- , do_kernel = -1
- , do_slab = -1
- , do_hugepages = -1
- , do_transparent_hugepages = -1
- , do_reclaiming = -1
- , do_high_low = -1
- , do_cma = -1
- , do_directmap = -1;
-
- static ARL_BASE *arl_base = NULL;
- static ARL_ENTRY *arl_hwcorrupted = NULL, *arl_memavailable = NULL, *arl_hugepages_total = NULL,
- *arl_zswapped = NULL, *arl_high_low = NULL, *arl_cma_total = NULL,
- *arl_directmap4k = NULL, *arl_directmap2m = NULL, *arl_directmap4m = NULL, *arl_directmap1g = NULL;
-
- static unsigned long long
- MemTotal = 0
- , MemFree = 0
- , MemAvailable = 0
- , Buffers = 0
- , Cached = 0
- , SwapCached = 0
- , Active = 0
- , Inactive = 0
- , ActiveAnon = 0
- , InactiveAnon = 0
- , ActiveFile = 0
- , InactiveFile = 0
- , Unevictable = 0
- , Mlocked = 0
- , HighTotal = 0
- , HighFree = 0
- , LowTotal = 0
- , LowFree = 0
- , MmapCopy = 0
- , SwapTotal = 0
- , SwapFree = 0
- , Zswap = 0
- , Zswapped = 0
- , Dirty = 0
- , Writeback = 0
- , AnonPages = 0
- , Mapped = 0
- , Shmem = 0
- , KReclaimable = 0
- , Slab = 0
- , SReclaimable = 0
- , SUnreclaim = 0
- , KernelStack = 0
- , ShadowCallStack = 0
- , PageTables = 0
- , SecPageTables = 0
- , NFS_Unstable = 0
- , Bounce = 0
- , WritebackTmp = 0
- , CommitLimit = 0
- , Committed_AS = 0
- , VmallocTotal = 0
- , VmallocUsed = 0
- , VmallocChunk = 0
- , Percpu = 0
- //, EarlyMemtestBad = 0
- , HardwareCorrupted = 0
- , AnonHugePages = 0
- , ShmemHugePages = 0
- , ShmemPmdMapped = 0
- , FileHugePages = 0
- , FilePmdMapped = 0
- , CmaTotal = 0
- , CmaFree = 0
- //, Unaccepted = 0
- , HugePages_Total = 0
- , HugePages_Free = 0
- , HugePages_Rsvd = 0
- , HugePages_Surp = 0
- , Hugepagesize = 0
- //, Hugetlb = 0
- , DirectMap4k = 0
- , DirectMap2M = 0
- , DirectMap4M = 0
- , DirectMap1G = 0
- ;
-
- if(unlikely(!arl_base)) {
- do_ram = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system ram", 1);
- do_swap = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "system swap", CONFIG_BOOLEAN_AUTO);
- do_hwcorrupt = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hardware corrupted ECC", CONFIG_BOOLEAN_AUTO);
- do_committed = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "committed memory", 1);
- do_writeback = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "writeback memory", 1);
- do_kernel = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "kernel memory", 1);
- do_slab = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "slab memory", 1);
- do_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "hugepages", CONFIG_BOOLEAN_AUTO);
- do_transparent_hugepages = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "transparent hugepages", CONFIG_BOOLEAN_AUTO);
- do_reclaiming = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "memory reclaiming", CONFIG_BOOLEAN_AUTO);
- do_high_low = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "high low memory", CONFIG_BOOLEAN_AUTO);
- do_cma = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "cma memory", CONFIG_BOOLEAN_AUTO);
- do_directmap = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "direct maps", CONFIG_BOOLEAN_AUTO);
-
- // https://github.com/torvalds/linux/blob/master/fs/proc/meminfo.c
-
- arl_base = arl_create("meminfo", NULL, 60);
- arl_expect(arl_base, "MemTotal", &MemTotal);
- arl_expect(arl_base, "MemFree", &MemFree);
- arl_memavailable = arl_expect(arl_base, "MemAvailable", &MemAvailable);
- arl_expect(arl_base, "Buffers", &Buffers);
- arl_expect(arl_base, "Cached", &Cached);
- arl_expect(arl_base, "SwapCached", &SwapCached);
- arl_expect(arl_base, "Active", &Active);
- arl_expect(arl_base, "Inactive", &Inactive);
- arl_expect(arl_base, "Active(anon)", &ActiveAnon);
- arl_expect(arl_base, "Inactive(anon)", &InactiveAnon);
- arl_expect(arl_base, "Active(file)", &ActiveFile);
- arl_expect(arl_base, "Inactive(file)", &InactiveFile);
- arl_expect(arl_base, "Unevictable", &Unevictable);
- arl_expect(arl_base, "Mlocked", &Mlocked);
-
- // CONFIG_HIGHMEM
- arl_high_low = arl_expect(arl_base, "HighTotal", &HighTotal);
- arl_expect(arl_base, "HighFree", &HighFree);
- arl_expect(arl_base, "LowTotal", &LowTotal);
- arl_expect(arl_base, "LowFree", &LowFree);
-
- // CONFIG_MMU
- arl_expect(arl_base, "MmapCopy", &MmapCopy);
-
- arl_expect(arl_base, "SwapTotal", &SwapTotal);
- arl_expect(arl_base, "SwapFree", &SwapFree);
-
- // CONFIG_ZSWAP
- arl_zswapped = arl_expect(arl_base, "Zswap", &Zswap);
- arl_expect(arl_base, "Zswapped", &Zswapped);
-
- arl_expect(arl_base, "Dirty", &Dirty);
- arl_expect(arl_base, "Writeback", &Writeback);
- arl_expect(arl_base, "AnonPages", &AnonPages);
- arl_expect(arl_base, "Mapped", &Mapped);
- arl_expect(arl_base, "Shmem", &Shmem);
- arl_expect(arl_base, "KReclaimable", &KReclaimable);
- arl_expect(arl_base, "Slab", &Slab);
- arl_expect(arl_base, "SReclaimable", &SReclaimable);
- arl_expect(arl_base, "SUnreclaim", &SUnreclaim);
- arl_expect(arl_base, "KernelStack", &KernelStack);
-
- // CONFIG_SHADOW_CALL_STACK
- arl_expect(arl_base, "ShadowCallStack", &ShadowCallStack);
-
- arl_expect(arl_base, "PageTables", &PageTables);
- arl_expect(arl_base, "SecPageTables", &SecPageTables);
- arl_expect(arl_base, "NFS_Unstable", &NFS_Unstable);
- arl_expect(arl_base, "Bounce", &Bounce);
- arl_expect(arl_base, "WritebackTmp", &WritebackTmp);
- arl_expect(arl_base, "CommitLimit", &CommitLimit);
- arl_expect(arl_base, "Committed_AS", &Committed_AS);
- arl_expect(arl_base, "VmallocTotal", &VmallocTotal);
- arl_expect(arl_base, "VmallocUsed", &VmallocUsed);
- arl_expect(arl_base, "VmallocChunk", &VmallocChunk);
- arl_expect(arl_base, "Percpu", &Percpu);
-
- // CONFIG_MEMTEST
- //arl_expect(arl_base, "EarlyMemtestBad", &EarlyMemtestBad);
-
- // CONFIG_MEMORY_FAILURE
- arl_hwcorrupted = arl_expect(arl_base, "HardwareCorrupted", &HardwareCorrupted);
-
- // CONFIG_TRANSPARENT_HUGEPAGE
- arl_expect(arl_base, "AnonHugePages", &AnonHugePages);
- arl_expect(arl_base, "ShmemHugePages", &ShmemHugePages);
- arl_expect(arl_base, "ShmemPmdMapped", &ShmemPmdMapped);
- arl_expect(arl_base, "FileHugePages", &FileHugePages);
- arl_expect(arl_base, "FilePmdMapped", &FilePmdMapped);
-
- // CONFIG_CMA
- arl_cma_total = arl_expect(arl_base, "CmaTotal", &CmaTotal);
- arl_expect(arl_base, "CmaFree", &CmaFree);
-
- // CONFIG_UNACCEPTED_MEMORY
- //arl_expect(arl_base, "Unaccepted", &Unaccepted);
-
- // these appear only when hugepages are supported
- arl_hugepages_total = arl_expect(arl_base, "HugePages_Total", &HugePages_Total);
- arl_expect(arl_base, "HugePages_Free", &HugePages_Free);
- arl_expect(arl_base, "HugePages_Rsvd", &HugePages_Rsvd);
- arl_expect(arl_base, "HugePages_Surp", &HugePages_Surp);
- arl_expect(arl_base, "Hugepagesize", &Hugepagesize);
- //arl_expect(arl_base, "Hugetlb", &Hugetlb);
-
- arl_directmap4k = arl_expect(arl_base, "DirectMap4k", &DirectMap4k);
- arl_directmap2m = arl_expect(arl_base, "DirectMap2M", &DirectMap2M);
- arl_directmap4m = arl_expect(arl_base, "DirectMap4M", &DirectMap4M);
- arl_directmap1g = arl_expect(arl_base, "DirectMap1G", &DirectMap1G);
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/meminfo");
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_MEMINFO, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- arl_begin(arl_base);
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) continue;
-
- if(unlikely(arl_check(arl_base,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- // http://calimeroteknik.free.fr/blag/?article20/really-used-memory-on-gnu-linux
- // KReclaimable includes SReclaimable, it was added in kernel v4.20
- unsigned long long reclaimable = KReclaimable > 0 ? KReclaimable : SReclaimable;
- unsigned long long MemCached = Cached + reclaimable - Shmem;
- unsigned long long MemUsed = MemTotal - MemFree - MemCached - Buffers;
- // The Linux kernel doesn't report ZFS ARC usage as cache memory (the ARC is included in the total used system memory)
- if (!inside_lxc_container) {
- MemCached += (zfs_arcstats_shrinkable_cache_size_bytes / 1024);
- MemUsed -= (zfs_arcstats_shrinkable_cache_size_bytes / 1024);
- MemAvailable += (zfs_arcstats_shrinkable_cache_size_bytes / 1024);
- }
-
- if(do_ram) {
- {
- static RRDSET *st_system_ram = NULL;
- static RRDDIM *rd_free = NULL, *rd_used = NULL, *rd_cached = NULL, *rd_buffers = NULL;
-
- if(unlikely(!st_system_ram)) {
- st_system_ram = rrdset_create_localhost(
- "system"
- , "ram"
- , NULL
- , "ram"
- , NULL
- , "System RAM"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_SYSTEM_RAM
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_free = rrddim_add(st_system_ram, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_used = rrddim_add(st_system_ram, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_cached = rrddim_add(st_system_ram, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_buffers = rrddim_add(st_system_ram, "buffers", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_system_ram, rd_free, MemFree);
- rrddim_set_by_pointer(st_system_ram, rd_used, MemUsed);
- rrddim_set_by_pointer(st_system_ram, rd_cached, MemCached);
- rrddim_set_by_pointer(st_system_ram, rd_buffers, Buffers);
- rrdset_done(st_system_ram);
- }
-
- if(arl_memavailable->flags & ARL_ENTRY_FLAG_FOUND) {
- static RRDSET *st_mem_available = NULL;
- static RRDDIM *rd_avail = NULL;
-
- if(unlikely(!st_mem_available)) {
- st_mem_available = rrdset_create_localhost(
- "mem"
- , "available"
- , NULL
- , "overview"
- , NULL
- , "Available RAM for applications"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_SYSTEM_AVAILABLE
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_avail = rrddim_add(st_mem_available, "MemAvailable", "avail", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_available, rd_avail, MemAvailable);
- rrdset_done(st_mem_available);
- }
- }
-
- unsigned long long SwapUsed = SwapTotal - SwapFree;
-
- if(do_swap == CONFIG_BOOLEAN_YES || (do_swap == CONFIG_BOOLEAN_AUTO &&
- (SwapTotal || SwapUsed || SwapFree ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_swap = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_system_swap = NULL;
- static RRDDIM *rd_free = NULL, *rd_used = NULL;
-
- if(unlikely(!st_system_swap)) {
- st_system_swap = rrdset_create_localhost(
- "mem"
- , "swap"
- , NULL
- , "swap"
- , NULL
- , "System Swap"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_SWAP
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL);
-
- rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_system_swap, rd_used, SwapUsed);
- rrddim_set_by_pointer(st_system_swap, rd_free, SwapFree);
- rrdset_done(st_system_swap);
-
- {
- static RRDSET *st_mem_swap_cached = NULL;
- static RRDDIM *rd_cached = NULL;
-
- if (unlikely(!st_mem_swap_cached)) {
- st_mem_swap_cached = rrdset_create_localhost(
- "mem"
- , "swap_cached"
- , NULL
- , "swap"
- , NULL
- , "Swap Memory Cached in RAM"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_SWAP + 1
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_cached = rrddim_add(st_mem_swap_cached, "cached", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_swap_cached, rd_cached, SwapCached);
- rrdset_done(st_mem_swap_cached);
- }
-
- if(arl_zswapped->flags & ARL_ENTRY_FLAG_FOUND) {
- static RRDSET *st_mem_zswap = NULL;
- static RRDDIM *rd_zswap = NULL, *rd_zswapped = NULL;
-
- if (unlikely(!st_mem_zswap)) {
- st_mem_zswap = rrdset_create_localhost(
- "mem"
- , "zswap"
- , NULL
- , "zswap"
- , NULL
- , "Zswap Usage"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_ZSWAP
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_zswap = rrddim_add(st_mem_zswap, "zswap", "in-ram", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_zswapped = rrddim_add(st_mem_zswap, "zswapped", "on-disk", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_zswap, rd_zswap, Zswap);
- rrddim_set_by_pointer(st_mem_zswap, rd_zswapped, Zswapped);
- rrdset_done(st_mem_zswap);
- }
- }
-
- if(arl_hwcorrupted->flags & ARL_ENTRY_FLAG_FOUND &&
- (do_hwcorrupt == CONFIG_BOOLEAN_YES || (do_hwcorrupt == CONFIG_BOOLEAN_AUTO &&
- (HardwareCorrupted > 0 ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
- do_hwcorrupt = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_mem_hwcorrupt = NULL;
- static RRDDIM *rd_corrupted = NULL;
-
- if(unlikely(!st_mem_hwcorrupt)) {
- st_mem_hwcorrupt = rrdset_create_localhost(
- "mem"
- , "hwcorrupt"
- , NULL
- , "ecc"
- , NULL
- , "Corrupted Memory, detected by ECC"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_HW
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL);
-
- rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_hwcorrupt, rd_corrupted, HardwareCorrupted);
- rrdset_done(st_mem_hwcorrupt);
- }
-
- if(do_committed) {
- static RRDSET *st_mem_committed = NULL;
- static RRDDIM *rd_committed = NULL;
-
- if(unlikely(!st_mem_committed)) {
- st_mem_committed = rrdset_create_localhost(
- "mem"
- , "committed"
- , NULL
- , "overview"
- , NULL
- , "Committed (Allocated) Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL);
-
- rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_committed, rd_committed, Committed_AS);
- rrdset_done(st_mem_committed);
- }
-
- if(do_writeback) {
- static RRDSET *st_mem_writeback = NULL;
- static RRDDIM *rd_dirty = NULL, *rd_writeback = NULL, *rd_fusewriteback = NULL, *rd_nfs_writeback = NULL, *rd_bounce = NULL;
-
- if(unlikely(!st_mem_writeback)) {
- st_mem_writeback = rrdset_create_localhost(
- "mem"
- , "writeback"
- , NULL
- , "writeback"
- , NULL
- , "Writeback Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_KERNEL
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL);
-
- rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_fusewriteback = rrddim_add(st_mem_writeback, "FuseWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_nfs_writeback = rrddim_add(st_mem_writeback, "NfsWriteback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_bounce = rrddim_add(st_mem_writeback, "Bounce", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_writeback, rd_dirty, Dirty);
- rrddim_set_by_pointer(st_mem_writeback, rd_writeback, Writeback);
- rrddim_set_by_pointer(st_mem_writeback, rd_fusewriteback, WritebackTmp);
- rrddim_set_by_pointer(st_mem_writeback, rd_nfs_writeback, NFS_Unstable);
- rrddim_set_by_pointer(st_mem_writeback, rd_bounce, Bounce);
- rrdset_done(st_mem_writeback);
- }
-
- // --------------------------------------------------------------------
-
- if(do_kernel) {
- static RRDSET *st_mem_kernel = NULL;
- static RRDDIM *rd_slab = NULL, *rd_kernelstack = NULL, *rd_pagetables = NULL, *rd_vmallocused = NULL,
- *rd_percpu = NULL, *rd_kreclaimable = NULL;
-
- if(unlikely(!st_mem_kernel)) {
- st_mem_kernel = rrdset_create_localhost(
- "mem"
- , "kernel"
- , NULL
- , "kernel"
- , NULL
- , "Memory Used by Kernel"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_KERNEL + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL);
-
- rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_vmallocused = rrddim_add(st_mem_kernel, "VmallocUsed", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_percpu = rrddim_add(st_mem_kernel, "Percpu", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_kreclaimable = rrddim_add(st_mem_kernel, "KReclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_kernel, rd_slab, Slab);
- rrddim_set_by_pointer(st_mem_kernel, rd_kernelstack, KernelStack);
- rrddim_set_by_pointer(st_mem_kernel, rd_pagetables, PageTables);
- rrddim_set_by_pointer(st_mem_kernel, rd_vmallocused, VmallocUsed);
- rrddim_set_by_pointer(st_mem_kernel, rd_percpu, Percpu);
- rrddim_set_by_pointer(st_mem_kernel, rd_kreclaimable, KReclaimable);
-
- rrdset_done(st_mem_kernel);
- }
-
- if(do_slab) {
- static RRDSET *st_mem_slab = NULL;
- static RRDDIM *rd_reclaimable = NULL, *rd_unreclaimable = NULL;
-
- if(unlikely(!st_mem_slab)) {
- st_mem_slab = rrdset_create_localhost(
- "mem"
- , "slab"
- , NULL
- , "slab"
- , NULL
- , "Reclaimable Kernel Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_SLAB
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL);
-
- rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_slab, rd_reclaimable, SReclaimable);
- rrddim_set_by_pointer(st_mem_slab, rd_unreclaimable, SUnreclaim);
- rrdset_done(st_mem_slab);
- }
-
- if(arl_hugepages_total->flags & ARL_ENTRY_FLAG_FOUND &&
- (do_hugepages == CONFIG_BOOLEAN_YES || (do_hugepages == CONFIG_BOOLEAN_AUTO &&
- ((Hugepagesize && HugePages_Total) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))) {
- do_hugepages = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_mem_hugepages = NULL;
- static RRDDIM *rd_used = NULL, *rd_free = NULL, *rd_rsvd = NULL, *rd_surp = NULL;
-
- if(unlikely(!st_mem_hugepages)) {
- st_mem_hugepages = rrdset_create_localhost(
- "mem"
- , "hugepages"
- , NULL
- , "hugepages"
- , NULL
- , "Dedicated HugePages Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL);
-
- rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_rsvd = rrddim_add(st_mem_hugepages, "reserved", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_hugepages, rd_used, HugePages_Total - HugePages_Free - HugePages_Rsvd);
- rrddim_set_by_pointer(st_mem_hugepages, rd_free, HugePages_Free);
- rrddim_set_by_pointer(st_mem_hugepages, rd_rsvd, HugePages_Rsvd);
- rrddim_set_by_pointer(st_mem_hugepages, rd_surp, HugePages_Surp);
- rrdset_done(st_mem_hugepages);
- }
-
- if(do_transparent_hugepages == CONFIG_BOOLEAN_YES || (do_transparent_hugepages == CONFIG_BOOLEAN_AUTO &&
- (AnonHugePages ||
- ShmemHugePages ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_transparent_hugepages = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_mem_transparent_hugepages = NULL;
- static RRDDIM *rd_anonymous = NULL, *rd_shared = NULL;
-
- if(unlikely(!st_mem_transparent_hugepages)) {
- st_mem_transparent_hugepages = rrdset_create_localhost(
- "mem"
- , "thp"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent HugePages Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL);
-
- rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_anonymous, AnonHugePages);
- rrddim_set_by_pointer(st_mem_transparent_hugepages, rd_shared, ShmemHugePages);
- rrdset_done(st_mem_transparent_hugepages);
-
- {
- static RRDSET *st_mem_thp_details = NULL;
- static RRDDIM *rd_shmem_pmd_mapped = NULL, *rd_file_huge_pages = NULL, *rd_file_pmd_mapped = NULL;
-
- if(unlikely(!st_mem_thp_details)) {
- st_mem_thp_details = rrdset_create_localhost(
- "mem"
- , "thp_details"
- , NULL
- , "hugepages"
- , NULL
- , "Details of Transparent HugePages Usage"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_DETAILS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_mem_thp_details, RRDSET_FLAG_DETAIL);
-
- rd_shmem_pmd_mapped = rrddim_add(st_mem_thp_details, "shmem_pmd", "ShmemPmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_file_huge_pages = rrddim_add(st_mem_thp_details, "file", "FileHugePages", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_file_pmd_mapped = rrddim_add(st_mem_thp_details, "file_pmd", "FilePmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_thp_details, rd_shmem_pmd_mapped, ShmemPmdMapped);
- rrddim_set_by_pointer(st_mem_thp_details, rd_file_huge_pages, FileHugePages);
- rrddim_set_by_pointer(st_mem_thp_details, rd_file_pmd_mapped, FilePmdMapped);
- rrdset_done(st_mem_thp_details);
- }
- }
-
- if(do_reclaiming != CONFIG_BOOLEAN_NO) {
- static RRDSET *st_mem_reclaiming = NULL;
- static RRDDIM *rd_active = NULL, *rd_inactive = NULL,
- *rd_active_anon = NULL, *rd_inactive_anon = NULL,
- *rd_active_file = NULL, *rd_inactive_file = NULL,
- *rd_unevictable = NULL, *rd_mlocked = NULL;
-
- if(unlikely(!st_mem_reclaiming)) {
- st_mem_reclaiming = rrdset_create_localhost(
- "mem"
- , "reclaiming"
- , NULL
- , "reclaiming"
- , NULL
- , "Memory Reclaiming"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_RECLAIMING
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_mem_reclaiming, RRDSET_FLAG_DETAIL);
-
- rd_active = rrddim_add(st_mem_reclaiming, "active", "Active", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_inactive = rrddim_add(st_mem_reclaiming, "inactive", "Inactive", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_active_anon = rrddim_add(st_mem_reclaiming, "active_anon", "Active(anon)", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_inactive_anon = rrddim_add(st_mem_reclaiming, "inactive_anon", "Inactive(anon)", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_active_file = rrddim_add(st_mem_reclaiming, "active_file", "Active(file)", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_inactive_file = rrddim_add(st_mem_reclaiming, "inactive_file", "Inactive(file)", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_unevictable = rrddim_add(st_mem_reclaiming, "unevictable", "Unevictable", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_mlocked = rrddim_add(st_mem_reclaiming, "mlocked", "Mlocked", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_reclaiming, rd_active, Active);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_inactive, Inactive);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_active_anon, ActiveAnon);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_inactive_anon, InactiveAnon);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_active_file, ActiveFile);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_inactive_file, InactiveFile);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_unevictable, Unevictable);
- rrddim_set_by_pointer(st_mem_reclaiming, rd_mlocked, Mlocked);
-
- rrdset_done(st_mem_reclaiming);
- }
-
- if(do_high_low != CONFIG_BOOLEAN_NO && (arl_high_low->flags & ARL_ENTRY_FLAG_FOUND)) {
- static RRDSET *st_mem_high_low = NULL;
- static RRDDIM *rd_high_used = NULL, *rd_low_used = NULL;
- static RRDDIM *rd_high_free = NULL, *rd_low_free = NULL;
-
- if(unlikely(!st_mem_high_low)) {
- st_mem_high_low = rrdset_create_localhost(
- "mem"
- , "high_low"
- , NULL
- , "high_low"
- , NULL
- , "High and Low Used and Free Memory Areas"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_HIGH_LOW
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st_mem_high_low, RRDSET_FLAG_DETAIL);
-
- rd_high_used = rrddim_add(st_mem_high_low, "high_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_low_used = rrddim_add(st_mem_high_low, "low_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_high_free = rrddim_add(st_mem_high_low, "high_free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_low_free = rrddim_add(st_mem_high_low, "low_free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_high_low, rd_high_used, HighTotal - HighFree);
- rrddim_set_by_pointer(st_mem_high_low, rd_low_used, LowTotal - LowFree);
- rrddim_set_by_pointer(st_mem_high_low, rd_high_free, HighFree);
- rrddim_set_by_pointer(st_mem_high_low, rd_low_free, LowFree);
- rrdset_done(st_mem_high_low);
- }
-
- if(do_cma == CONFIG_BOOLEAN_YES || (do_cma == CONFIG_BOOLEAN_AUTO && (arl_cma_total->flags & ARL_ENTRY_FLAG_FOUND) && CmaTotal)) {
- do_cma = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_mem_cma = NULL;
- static RRDDIM *rd_used = NULL, *rd_free = NULL;
-
- if(unlikely(!st_mem_cma)) {
- st_mem_cma = rrdset_create_localhost(
- "mem"
- , "cma"
- , NULL
- , "cma"
- , NULL
- , "Contiguous Memory Allocator (CMA) Memory"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_CMA
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_used = rrddim_add(st_mem_cma, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_free = rrddim_add(st_mem_cma, "free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_cma, rd_used, CmaTotal - CmaFree);
- rrddim_set_by_pointer(st_mem_cma, rd_free, CmaFree);
- rrdset_done(st_mem_cma);
- }
-
- if(do_directmap != CONFIG_BOOLEAN_NO &&
- ((arl_directmap4k->flags & ARL_ENTRY_FLAG_FOUND) ||
- (arl_directmap2m->flags & ARL_ENTRY_FLAG_FOUND) ||
- (arl_directmap4m->flags & ARL_ENTRY_FLAG_FOUND) ||
- (arl_directmap1g->flags & ARL_ENTRY_FLAG_FOUND)))
- {
- static RRDSET *st_mem_directmap = NULL;
- static RRDDIM *rd_4k = NULL, *rd_2m = NULL, *rd_1g = NULL, *rd_4m = NULL;
-
- if(unlikely(!st_mem_directmap)) {
- st_mem_directmap = rrdset_create_localhost(
- "mem"
- , "directmaps"
- , NULL
- , "overview"
- , NULL
- , "Direct Memory Mappings"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_MEMINFO_NAME
- , NETDATA_CHART_PRIO_MEM_DIRECTMAP
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- if(arl_directmap4k->flags & ARL_ENTRY_FLAG_FOUND)
- rd_4k = rrddim_add(st_mem_directmap, "4k", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- if(arl_directmap2m->flags & ARL_ENTRY_FLAG_FOUND)
- rd_2m = rrddim_add(st_mem_directmap, "2m", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- if(arl_directmap4m->flags & ARL_ENTRY_FLAG_FOUND)
- rd_4m = rrddim_add(st_mem_directmap, "4m", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- if(arl_directmap1g->flags & ARL_ENTRY_FLAG_FOUND)
- rd_1g = rrddim_add(st_mem_directmap, "1g", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- if(rd_4k)
- rrddim_set_by_pointer(st_mem_directmap, rd_4k, DirectMap4k);
-
- if(rd_2m)
- rrddim_set_by_pointer(st_mem_directmap, rd_2m, DirectMap2M);
-
- if(rd_4m)
- rrddim_set_by_pointer(st_mem_directmap, rd_4m, DirectMap4M);
-
- if(rd_1g)
- rrddim_set_by_pointer(st_mem_directmap, rd_1g, DirectMap1G);
-
- rrdset_done(st_mem_directmap);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c
deleted file mode 100644
index b39f39683..000000000
--- a/collectors/proc.plugin/proc_net_dev.c
+++ /dev/null
@@ -1,1956 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NETDEV_NAME "/proc/net/dev"
-#define CONFIG_SECTION_PLUGIN_PROC_NETDEV "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETDEV_NAME
-
-#define RRDFUNCTIONS_NETDEV_HELP "View network interface statistics"
-
-#define STATE_LENGTH_MAX 32
-
-#define READ_RETRY_PERIOD 60 // seconds
-
-void cgroup_netdev_reset_all(void);
-void cgroup_netdev_release(const DICTIONARY_ITEM *link);
-const void *cgroup_netdev_dup(const DICTIONARY_ITEM *link);
-void cgroup_netdev_add_bandwidth(const DICTIONARY_ITEM *link, NETDATA_DOUBLE received, NETDATA_DOUBLE sent);
-
-enum {
- NETDEV_DUPLEX_UNKNOWN,
- NETDEV_DUPLEX_HALF,
- NETDEV_DUPLEX_FULL
-};
-
-static const char *get_duplex_string(int duplex)
-{
- switch (duplex) {
- case NETDEV_DUPLEX_FULL:
- return "full";
- case NETDEV_DUPLEX_HALF:
- return "half";
- default:
- return "unknown";
- }
-}
-
-enum {
- NETDEV_OPERSTATE_UNKNOWN,
- NETDEV_OPERSTATE_NOTPRESENT,
- NETDEV_OPERSTATE_DOWN,
- NETDEV_OPERSTATE_LOWERLAYERDOWN,
- NETDEV_OPERSTATE_TESTING,
- NETDEV_OPERSTATE_DORMANT,
- NETDEV_OPERSTATE_UP
-};
-
-static inline int get_operstate(char *operstate)
-{
- // As defined in https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net
- if (!strcmp(operstate, "up"))
- return NETDEV_OPERSTATE_UP;
- if (!strcmp(operstate, "down"))
- return NETDEV_OPERSTATE_DOWN;
- if (!strcmp(operstate, "notpresent"))
- return NETDEV_OPERSTATE_NOTPRESENT;
- if (!strcmp(operstate, "lowerlayerdown"))
- return NETDEV_OPERSTATE_LOWERLAYERDOWN;
- if (!strcmp(operstate, "testing"))
- return NETDEV_OPERSTATE_TESTING;
- if (!strcmp(operstate, "dormant"))
- return NETDEV_OPERSTATE_DORMANT;
-
- return NETDEV_OPERSTATE_UNKNOWN;
-}
-
-static const char *get_operstate_string(int operstate)
-{
- switch (operstate) {
- case NETDEV_OPERSTATE_UP:
- return "up";
- case NETDEV_OPERSTATE_DOWN:
- return "down";
- case NETDEV_OPERSTATE_NOTPRESENT:
- return "notpresent";
- case NETDEV_OPERSTATE_LOWERLAYERDOWN:
- return "lowerlayerdown";
- case NETDEV_OPERSTATE_TESTING:
- return "testing";
- case NETDEV_OPERSTATE_DORMANT:
- return "dormant";
- default:
- return "unknown";
- }
-}
-
-// ----------------------------------------------------------------------------
-// netdev list
-
-static struct netdev {
- char *name;
- uint32_t hash;
- size_t len;
-
- // flags
- int virtual;
- int configured;
- int enabled;
- int updated;
-
- bool function_ready;
-
- time_t discover_time;
-
- int carrier_file_exists;
- time_t carrier_file_lost_time;
-
- int duplex_file_exists;
- time_t duplex_file_lost_time;
-
- int speed_file_exists;
- time_t speed_file_lost_time;
-
- int do_bandwidth;
- int do_packets;
- int do_errors;
- int do_drops;
- int do_fifo;
- int do_compressed;
- int do_events;
- int do_speed;
- int do_duplex;
- int do_operstate;
- int do_carrier;
- int do_mtu;
-
- const char *chart_type_net_bytes;
- const char *chart_type_net_packets;
- const char *chart_type_net_errors;
- const char *chart_type_net_fifo;
- const char *chart_type_net_events;
- const char *chart_type_net_drops;
- const char *chart_type_net_compressed;
- const char *chart_type_net_speed;
- const char *chart_type_net_duplex;
- const char *chart_type_net_operstate;
- const char *chart_type_net_carrier;
- const char *chart_type_net_mtu;
-
- const char *chart_id_net_bytes;
- const char *chart_id_net_packets;
- const char *chart_id_net_errors;
- const char *chart_id_net_fifo;
- const char *chart_id_net_events;
- const char *chart_id_net_drops;
- const char *chart_id_net_compressed;
- const char *chart_id_net_speed;
- const char *chart_id_net_duplex;
- const char *chart_id_net_operstate;
- const char *chart_id_net_carrier;
- const char *chart_id_net_mtu;
-
- const char *chart_ctx_net_bytes;
- const char *chart_ctx_net_packets;
- const char *chart_ctx_net_errors;
- const char *chart_ctx_net_fifo;
- const char *chart_ctx_net_events;
- const char *chart_ctx_net_drops;
- const char *chart_ctx_net_compressed;
- const char *chart_ctx_net_speed;
- const char *chart_ctx_net_duplex;
- const char *chart_ctx_net_operstate;
- const char *chart_ctx_net_carrier;
- const char *chart_ctx_net_mtu;
-
- const char *chart_family;
-
- RRDLABELS *chart_labels;
-
- int flipped;
- unsigned long priority;
-
- // data collected
- kernel_uint_t rbytes;
- kernel_uint_t rpackets;
- kernel_uint_t rerrors;
- kernel_uint_t rdrops;
- kernel_uint_t rfifo;
- kernel_uint_t rframe;
- kernel_uint_t rcompressed;
- kernel_uint_t rmulticast;
-
- kernel_uint_t tbytes;
- kernel_uint_t tpackets;
- kernel_uint_t terrors;
- kernel_uint_t tdrops;
- kernel_uint_t tfifo;
- kernel_uint_t tcollisions;
- kernel_uint_t tcarrier;
- kernel_uint_t tcompressed;
- kernel_uint_t speed;
- kernel_uint_t duplex;
- kernel_uint_t operstate;
- unsigned long long carrier;
- unsigned long long mtu;
-
- // charts
- RRDSET *st_bandwidth;
- RRDSET *st_packets;
- RRDSET *st_errors;
- RRDSET *st_drops;
- RRDSET *st_fifo;
- RRDSET *st_compressed;
- RRDSET *st_events;
- RRDSET *st_speed;
- RRDSET *st_duplex;
- RRDSET *st_operstate;
- RRDSET *st_carrier;
- RRDSET *st_mtu;
-
- // dimensions
- RRDDIM *rd_rbytes;
- RRDDIM *rd_rpackets;
- RRDDIM *rd_rerrors;
- RRDDIM *rd_rdrops;
- RRDDIM *rd_rfifo;
- RRDDIM *rd_rframe;
- RRDDIM *rd_rcompressed;
- RRDDIM *rd_rmulticast;
-
- RRDDIM *rd_tbytes;
- RRDDIM *rd_tpackets;
- RRDDIM *rd_terrors;
- RRDDIM *rd_tdrops;
- RRDDIM *rd_tfifo;
- RRDDIM *rd_tcollisions;
- RRDDIM *rd_tcarrier;
- RRDDIM *rd_tcompressed;
-
- RRDDIM *rd_speed;
- RRDDIM *rd_duplex_full;
- RRDDIM *rd_duplex_half;
- RRDDIM *rd_duplex_unknown;
- RRDDIM *rd_operstate_unknown;
- RRDDIM *rd_operstate_notpresent;
- RRDDIM *rd_operstate_down;
- RRDDIM *rd_operstate_lowerlayerdown;
- RRDDIM *rd_operstate_testing;
- RRDDIM *rd_operstate_dormant;
- RRDDIM *rd_operstate_up;
- RRDDIM *rd_carrier_up;
- RRDDIM *rd_carrier_down;
- RRDDIM *rd_mtu;
-
- char *filename_speed;
- const RRDSETVAR_ACQUIRED *chart_var_speed;
-
- char *filename_duplex;
- char *filename_operstate;
- char *filename_carrier;
- char *filename_mtu;
-
- const DICTIONARY_ITEM *cgroup_netdev_link;
-
- struct netdev *next;
-} *netdev_root = NULL, *netdev_last_used = NULL;
-
-static size_t netdev_added = 0, netdev_found = 0;
-
-// ----------------------------------------------------------------------------
-
-static void netdev_charts_release(struct netdev *d) {
- if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth);
- if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets);
- if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors);
- if(d->st_drops) rrdset_is_obsolete___safe_from_collector_thread(d->st_drops);
- if(d->st_fifo) rrdset_is_obsolete___safe_from_collector_thread(d->st_fifo);
- if(d->st_compressed) rrdset_is_obsolete___safe_from_collector_thread(d->st_compressed);
- if(d->st_events) rrdset_is_obsolete___safe_from_collector_thread(d->st_events);
- if(d->st_speed) rrdset_is_obsolete___safe_from_collector_thread(d->st_speed);
- if(d->st_duplex) rrdset_is_obsolete___safe_from_collector_thread(d->st_duplex);
- if(d->st_operstate) rrdset_is_obsolete___safe_from_collector_thread(d->st_operstate);
- if(d->st_carrier) rrdset_is_obsolete___safe_from_collector_thread(d->st_carrier);
- if(d->st_mtu) rrdset_is_obsolete___safe_from_collector_thread(d->st_mtu);
-
- d->st_bandwidth = NULL;
- d->st_compressed = NULL;
- d->st_drops = NULL;
- d->st_errors = NULL;
- d->st_events = NULL;
- d->st_fifo = NULL;
- d->st_packets = NULL;
- d->st_speed = NULL;
- d->st_duplex = NULL;
- d->st_operstate = NULL;
- d->st_carrier = NULL;
- d->st_mtu = NULL;
-
- d->rd_rbytes = NULL;
- d->rd_rpackets = NULL;
- d->rd_rerrors = NULL;
- d->rd_rdrops = NULL;
- d->rd_rfifo = NULL;
- d->rd_rframe = NULL;
- d->rd_rcompressed = NULL;
- d->rd_rmulticast = NULL;
-
- d->rd_tbytes = NULL;
- d->rd_tpackets = NULL;
- d->rd_terrors = NULL;
- d->rd_tdrops = NULL;
- d->rd_tfifo = NULL;
- d->rd_tcollisions = NULL;
- d->rd_tcarrier = NULL;
- d->rd_tcompressed = NULL;
-
- d->rd_speed = NULL;
- d->rd_duplex_full = NULL;
- d->rd_duplex_half = NULL;
- d->rd_duplex_unknown = NULL;
- d->rd_carrier_up = NULL;
- d->rd_carrier_down = NULL;
- d->rd_mtu = NULL;
-
- d->rd_operstate_unknown = NULL;
- d->rd_operstate_notpresent = NULL;
- d->rd_operstate_down = NULL;
- d->rd_operstate_lowerlayerdown = NULL;
- d->rd_operstate_testing = NULL;
- d->rd_operstate_dormant = NULL;
- d->rd_operstate_up = NULL;
-
- d->chart_var_speed = NULL;
-}
-
-static void netdev_free_chart_strings(struct netdev *d) {
- freez((void *)d->chart_type_net_bytes);
- freez((void *)d->chart_type_net_compressed);
- freez((void *)d->chart_type_net_drops);
- freez((void *)d->chart_type_net_errors);
- freez((void *)d->chart_type_net_events);
- freez((void *)d->chart_type_net_fifo);
- freez((void *)d->chart_type_net_packets);
- freez((void *)d->chart_type_net_speed);
- freez((void *)d->chart_type_net_duplex);
- freez((void *)d->chart_type_net_operstate);
- freez((void *)d->chart_type_net_carrier);
- freez((void *)d->chart_type_net_mtu);
-
- freez((void *)d->chart_id_net_bytes);
- freez((void *)d->chart_id_net_compressed);
- freez((void *)d->chart_id_net_drops);
- freez((void *)d->chart_id_net_errors);
- freez((void *)d->chart_id_net_events);
- freez((void *)d->chart_id_net_fifo);
- freez((void *)d->chart_id_net_packets);
- freez((void *)d->chart_id_net_speed);
- freez((void *)d->chart_id_net_duplex);
- freez((void *)d->chart_id_net_operstate);
- freez((void *)d->chart_id_net_carrier);
- freez((void *)d->chart_id_net_mtu);
-
- freez((void *)d->chart_ctx_net_bytes);
- freez((void *)d->chart_ctx_net_compressed);
- freez((void *)d->chart_ctx_net_drops);
- freez((void *)d->chart_ctx_net_errors);
- freez((void *)d->chart_ctx_net_events);
- freez((void *)d->chart_ctx_net_fifo);
- freez((void *)d->chart_ctx_net_packets);
- freez((void *)d->chart_ctx_net_speed);
- freez((void *)d->chart_ctx_net_duplex);
- freez((void *)d->chart_ctx_net_operstate);
- freez((void *)d->chart_ctx_net_carrier);
- freez((void *)d->chart_ctx_net_mtu);
-
- freez((void *)d->chart_family);
-}
-
-static void netdev_free(struct netdev *d) {
- netdev_charts_release(d);
- netdev_free_chart_strings(d);
- rrdlabels_destroy(d->chart_labels);
- cgroup_netdev_release(d->cgroup_netdev_link);
-
- freez((void *)d->name);
- freez((void *)d->filename_speed);
- freez((void *)d->filename_duplex);
- freez((void *)d->filename_operstate);
- freez((void *)d->filename_carrier);
- freez((void *)d->filename_mtu);
- freez((void *)d);
- netdev_added--;
-}
-
-// ----------------------------------------------------------------------------
-// netdev renames
-
-static struct netdev_rename {
- const char *host_device;
- uint32_t hash;
-
- const char *container_device;
- const char *container_name;
- const char *ctx_prefix;
-
- RRDLABELS *chart_labels;
-
- int processed;
-
- const DICTIONARY_ITEM *cgroup_netdev_link;
-
- struct netdev_rename *next;
-} *netdev_rename_root = NULL;
-
-static int netdev_pending_renames = 0;
-static netdata_mutex_t netdev_rename_mutex = NETDATA_MUTEX_INITIALIZER;
-static netdata_mutex_t netdev_dev_mutex = NETDATA_MUTEX_INITIALIZER;
-
-static struct netdev_rename *netdev_rename_find(const char *host_device, uint32_t hash) {
- struct netdev_rename *r;
-
- for(r = netdev_rename_root; r ; r = r->next)
- if(r->hash == hash && !strcmp(host_device, r->host_device))
- return r;
-
- return NULL;
-}
-
-// other threads can call this function to register a rename to a netdev
-void netdev_rename_device_add(
- const char *host_device,
- const char *container_device,
- const char *container_name,
- RRDLABELS *labels,
- const char *ctx_prefix,
- const DICTIONARY_ITEM *cgroup_netdev_link)
-{
- netdata_mutex_lock(&netdev_rename_mutex);
-
- uint32_t hash = simple_hash(host_device);
- struct netdev_rename *r = netdev_rename_find(host_device, hash);
- if(!r) {
- r = callocz(1, sizeof(struct netdev_rename));
- r->host_device = strdupz(host_device);
- r->container_device = strdupz(container_device);
- r->container_name = strdupz(container_name);
- r->ctx_prefix = strdupz(ctx_prefix);
- r->chart_labels = rrdlabels_create();
- rrdlabels_migrate_to_these(r->chart_labels, labels);
- r->hash = hash;
- r->next = netdev_rename_root;
- r->processed = 0;
- r->cgroup_netdev_link = cgroup_netdev_link;
-
- netdev_rename_root = r;
- netdev_pending_renames++;
- collector_info("CGROUP: registered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
- }
- else {
- if(strcmp(r->container_device, container_device) != 0 || strcmp(r->container_name, container_name) != 0) {
- freez((void *) r->container_device);
- freez((void *) r->container_name);
-
- r->container_device = strdupz(container_device);
- r->container_name = strdupz(container_name);
-
- rrdlabels_migrate_to_these(r->chart_labels, labels);
-
- r->processed = 0;
- r->cgroup_netdev_link = cgroup_netdev_link;
-
- netdev_pending_renames++;
- collector_info("CGROUP: altered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
- }
- }
-
- netdata_mutex_unlock(&netdev_rename_mutex);
-}
-
-// other threads can call this function to delete a rename to a netdev
-void netdev_rename_device_del(const char *host_device) {
- netdata_mutex_lock(&netdev_rename_mutex);
-
- struct netdev_rename *r, *last = NULL;
-
- uint32_t hash = simple_hash(host_device);
- for(r = netdev_rename_root; r ; last = r, r = r->next) {
- if (r->hash == hash && !strcmp(host_device, r->host_device)) {
- if (netdev_rename_root == r)
- netdev_rename_root = r->next;
- else if (last)
- last->next = r->next;
-
- if(!r->processed)
- netdev_pending_renames--;
-
- collector_info("CGROUP: unregistered network interface rename for '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
-
- freez((void *) r->host_device);
- freez((void *) r->container_name);
- freez((void *) r->container_device);
- freez((void *) r->ctx_prefix);
- rrdlabels_destroy(r->chart_labels);
- cgroup_netdev_release(r->cgroup_netdev_link);
- freez((void *) r);
- break;
- }
- }
-
- netdata_mutex_unlock(&netdev_rename_mutex);
-}
-
-static inline void netdev_rename_cgroup(struct netdev *d, struct netdev_rename *r) {
- collector_info("CGROUP: renaming network interface '%s' as '%s' under '%s'", r->host_device, r->container_device, r->container_name);
-
- netdev_charts_release(d);
- netdev_free_chart_strings(d);
- d->cgroup_netdev_link = cgroup_netdev_dup(r->cgroup_netdev_link);
-
- char buffer[RRD_ID_LENGTH_MAX + 1];
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "cgroup_%s", r->container_name);
- d->chart_type_net_bytes = strdupz(buffer);
- d->chart_type_net_compressed = strdupz(buffer);
- d->chart_type_net_drops = strdupz(buffer);
- d->chart_type_net_errors = strdupz(buffer);
- d->chart_type_net_events = strdupz(buffer);
- d->chart_type_net_fifo = strdupz(buffer);
- d->chart_type_net_packets = strdupz(buffer);
- d->chart_type_net_speed = strdupz(buffer);
- d->chart_type_net_duplex = strdupz(buffer);
- d->chart_type_net_operstate = strdupz(buffer);
- d->chart_type_net_carrier = strdupz(buffer);
- d->chart_type_net_mtu = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_%s", r->container_device);
- d->chart_id_net_bytes = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_compressed_%s", r->container_device);
- d->chart_id_net_compressed = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_drops_%s", r->container_device);
- d->chart_id_net_drops = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_errors_%s", r->container_device);
- d->chart_id_net_errors = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_events_%s", r->container_device);
- d->chart_id_net_events = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_fifo_%s", r->container_device);
- d->chart_id_net_fifo = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_packets_%s", r->container_device);
- d->chart_id_net_packets = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_speed_%s", r->container_device);
- d->chart_id_net_speed = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_duplex_%s", r->container_device);
- d->chart_id_net_duplex = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_operstate_%s", r->container_device);
- d->chart_id_net_operstate = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_carrier_%s", r->container_device);
- d->chart_id_net_carrier = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "net_mtu_%s", r->container_device);
- d->chart_id_net_mtu = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_net", r->ctx_prefix);
- d->chart_ctx_net_bytes = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_compressed", r->ctx_prefix);
- d->chart_ctx_net_compressed = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_drops", r->ctx_prefix);
- d->chart_ctx_net_drops = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_errors", r->ctx_prefix);
- d->chart_ctx_net_errors = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_events", r->ctx_prefix);
- d->chart_ctx_net_events = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_fifo", r->ctx_prefix);
- d->chart_ctx_net_fifo = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_packets", r->ctx_prefix);
- d->chart_ctx_net_packets = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_speed", r->ctx_prefix);
- d->chart_ctx_net_speed = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_duplex", r->ctx_prefix);
- d->chart_ctx_net_duplex = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_operstate", r->ctx_prefix);
- d->chart_ctx_net_operstate = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_carrier", r->ctx_prefix);
- d->chart_ctx_net_carrier = strdupz(buffer);
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%scgroup.net_mtu", r->ctx_prefix);
- d->chart_ctx_net_mtu = strdupz(buffer);
-
- d->chart_family = strdupz("net");
-
- rrdlabels_copy(d->chart_labels, r->chart_labels);
- rrdlabels_add(d->chart_labels, "container_device", r->container_device, RRDLABEL_SRC_AUTO);
-
- d->priority = NETDATA_CHART_PRIO_CGROUP_NET_IFACE;
- d->flipped = 1;
-}
-
-static inline void netdev_rename(struct netdev *d) {
- struct netdev_rename *r = netdev_rename_find(d->name, d->hash);
- if(unlikely(r && !r->processed)) {
- netdev_rename_cgroup(d, r);
- r->processed = 1;
- d->discover_time = 0;
- netdev_pending_renames--;
- }
-}
-
-static inline void netdev_rename_lock(struct netdev *d) {
- netdata_mutex_lock(&netdev_rename_mutex);
- netdev_rename(d);
- netdata_mutex_unlock(&netdev_rename_mutex);
-}
-
-static inline void netdev_rename_all_lock(void) {
- netdata_mutex_lock(&netdev_rename_mutex);
-
- struct netdev *d;
- for(d = netdev_root; d ; d = d->next)
- netdev_rename(d);
-
- netdev_pending_renames = 0;
- netdata_mutex_unlock(&netdev_rename_mutex);
-}
-
-// ----------------------------------------------------------------------------
-
-int netdev_function_net_interfaces(BUFFER *wb, int timeout __maybe_unused, const char *function __maybe_unused,
- void *collector_data __maybe_unused,
- rrd_function_result_callback_t result_cb, void *result_cb_data,
- rrd_function_is_cancelled_cb_t is_cancelled_cb, void *is_cancelled_cb_data,
- rrd_function_register_canceller_cb_t register_canceller_cb __maybe_unused,
- void *register_canceller_cb_data __maybe_unused) {
-
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
-
- buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(localhost));
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", RRDFUNCTIONS_NETDEV_HELP);
- buffer_json_member_add_array(wb, "data");
-
- double max_traffic_rx = 0.0;
- double max_traffic_tx = 0.0;
- double max_traffic = 0.0;
- double max_packets_rx = 0.0;
- double max_packets_tx = 0.0;
- double max_mcast_rx = 0.0;
- double max_drops_rx = 0.0;
- double max_drops_tx = 0.0;
-
- netdata_mutex_lock(&netdev_dev_mutex);
-
- RRDDIM *rd = NULL;
-
- for (struct netdev *d = netdev_root; d != netdev_last_used; d = d->next) {
- if (unlikely(!d->function_ready))
- continue;
-
- buffer_json_add_array_item_array(wb);
-
- buffer_json_add_array_item_string(wb, d->name);
-
- buffer_json_add_array_item_string(wb, d->virtual ? "virtual" : "physical");
- buffer_json_add_array_item_string(wb, d->flipped ? "cgroup" : "host");
- buffer_json_add_array_item_string(wb, d->carrier == 1 ? "up" : "down");
- buffer_json_add_array_item_string(wb, get_operstate_string(d->operstate));
- buffer_json_add_array_item_string(wb, get_duplex_string(d->duplex));
- buffer_json_add_array_item_double(wb, d->speed > 0 ? d->speed : NAN);
- buffer_json_add_array_item_double(wb, d->mtu > 0 ? d->mtu : NAN);
-
- rd = d->flipped ? d->rd_tbytes : d->rd_rbytes;
- double traffic_rx = rrddim_get_last_stored_value(rd, &max_traffic_rx, 1000.0);
- rd = d->flipped ? d->rd_rbytes : d->rd_tbytes;
- double traffic_tx = rrddim_get_last_stored_value(rd, &max_traffic_tx, 1000.0);
-
- rd = d->flipped ? d->rd_tpackets : d->rd_rpackets;
- double packets_rx = rrddim_get_last_stored_value(rd, &max_packets_rx, 1000.0);
- rd = d->flipped ? d->rd_rpackets : d->rd_tpackets;
- double packets_tx = rrddim_get_last_stored_value(rd, &max_packets_tx, 1000.0);
-
- double mcast_rx = rrddim_get_last_stored_value(d->rd_rmulticast, &max_mcast_rx, 1000.0);
-
- rd = d->flipped ? d->rd_tdrops : d->rd_rdrops;
- double drops_rx = rrddim_get_last_stored_value(rd, &max_drops_rx, 1.0);
- rd = d->flipped ? d->rd_rdrops : d->rd_tdrops;
- double drops_tx = rrddim_get_last_stored_value(rd, &max_drops_tx, 1.0);
-
- // FIXME: "traffic" (total) is needed only for default_sorting
- // can be removed when default_sorting will accept multiple columns (sum)
- double traffic = NAN;
- if (!isnan(traffic_rx) && !isnan(traffic_tx)) {
- traffic = traffic_rx + traffic_tx;
- max_traffic = MAX(max_traffic, traffic);
- }
-
-
- buffer_json_add_array_item_double(wb, traffic_rx);
- buffer_json_add_array_item_double(wb, traffic_tx);
- buffer_json_add_array_item_double(wb, traffic);
- buffer_json_add_array_item_double(wb, packets_rx);
- buffer_json_add_array_item_double(wb, packets_tx);
- buffer_json_add_array_item_double(wb, mcast_rx);
- buffer_json_add_array_item_double(wb, drops_rx);
- buffer_json_add_array_item_double(wb, drops_tx);
-
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "severity", drops_rx + drops_tx > 0 ? "warning" : "normal");
- }
- buffer_json_object_close(wb);
-
- buffer_json_array_close(wb);
- }
-
- netdata_mutex_unlock(&netdev_dev_mutex);
-
- buffer_json_array_close(wb); // data
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- buffer_rrdf_table_add_field(wb, field_id++, "Interface", "Network Interface Name",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Type", "Network Interface Type",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "UsedBy", "Indicates whether the network interface is used by a cgroup or by the host system",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "PhState", "Current Physical State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "OpState", "Current Operational State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Duplex", "Current Duplex Mode",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Speed", "Current Link Speed",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
- 0, "Mbit", NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "MTU", "Maximum Transmission Unit",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER,
- 0, "Octets", NAN, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_UNIQUE_KEY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "In", "Traffic Received",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Mbit", max_traffic_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Out", "Traffic Sent",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Mbit", max_traffic_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "Total", "Traffic Received and Sent",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Mbit", max_traffic, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "PktsIn", "Received Packets",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Kpps", max_packets_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "PktsOut", "Sent Packets",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Kpps", max_packets_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "McastIn", "Multicast Received Packets",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Kpps", max_mcast_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "DropsIn", "Dropped Inbound Packets",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Drops", max_drops_rx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "DropsOut", "Dropped Outbound Packets",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
- 2, "Drops", max_drops_tx, RRDF_FIELD_SORT_DESCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE,
- NULL);
-
- buffer_rrdf_table_add_field(
- wb, field_id++,
- "rowOptions", "rowOptions",
- RRDF_FIELD_TYPE_NONE,
- RRDR_FIELD_VISUAL_ROW_OPTIONS,
- RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_FIXED,
- NULL,
- RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_DUMMY,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "Total");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "Traffic");
- {
- buffer_json_member_add_string(wb, "name", "Traffic");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "In");
- buffer_json_add_array_item_string(wb, "Out");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "Packets");
- {
- buffer_json_member_add_string(wb, "name", "Packets");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "PktsIn");
- buffer_json_add_array_item_string(wb, "PktsOut");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Traffic");
- buffer_json_add_array_item_string(wb, "Interface");
- buffer_json_array_close(wb);
-
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "Traffic");
- buffer_json_add_array_item_string(wb, "Type");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_object(wb, "group_by");
- {
- buffer_json_member_add_object(wb, "Type");
- {
- buffer_json_member_add_string(wb, "name", "Type");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Type");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "UsedBy");
- {
- buffer_json_member_add_string(wb, "name", "UsedBy");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "UsedBy");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- int response = HTTP_RESP_OK;
- if(is_cancelled_cb && is_cancelled_cb(is_cancelled_cb_data)) {
- buffer_flush(wb);
- response = HTTP_RESP_CLIENT_CLOSED_REQUEST;
- }
-
- if(result_cb)
- result_cb(wb, response, result_cb_data);
-
- return response;
-}
-
-// netdev data collection
-
-static void netdev_cleanup() {
- if(likely(netdev_found == netdev_added)) return;
-
- netdev_added = 0;
- struct netdev *d = netdev_root, *last = NULL;
- while(d) {
- if(unlikely(!d->updated)) {
- // collector_info("Removing network device '%s', linked after '%s'", d->name, last?last->name:"ROOT");
-
- if(netdev_last_used == d)
- netdev_last_used = last;
-
- struct netdev *t = d;
-
- if(d == netdev_root || !last)
- netdev_root = d = d->next;
-
- else
- last->next = d = d->next;
-
- t->next = NULL;
- netdev_free(t);
- }
- else {
- netdev_added++;
- last = d;
- d->updated = 0;
- d = d->next;
- }
- }
-}
-
-static struct netdev *get_netdev(const char *name) {
- struct netdev *d;
-
- uint32_t hash = simple_hash(name);
-
- // search it, from the last position to the end
- for(d = netdev_last_used ; d ; d = d->next) {
- if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
- netdev_last_used = d->next;
- return d;
- }
- }
-
- // search it from the beginning to the last position we used
- for(d = netdev_root ; d != netdev_last_used ; d = d->next) {
- if(unlikely(hash == d->hash && !strcmp(name, d->name))) {
- netdev_last_used = d->next;
- return d;
- }
- }
-
- // create a new one
- d = callocz(1, sizeof(struct netdev));
- d->name = strdupz(name);
- d->hash = simple_hash(d->name);
- d->len = strlen(d->name);
- d->chart_labels = rrdlabels_create();
- d->function_ready = false;
-
- d->chart_type_net_bytes = strdupz("net");
- d->chart_type_net_compressed = strdupz("net_compressed");
- d->chart_type_net_drops = strdupz("net_drops");
- d->chart_type_net_errors = strdupz("net_errors");
- d->chart_type_net_events = strdupz("net_events");
- d->chart_type_net_fifo = strdupz("net_fifo");
- d->chart_type_net_packets = strdupz("net_packets");
- d->chart_type_net_speed = strdupz("net_speed");
- d->chart_type_net_duplex = strdupz("net_duplex");
- d->chart_type_net_operstate = strdupz("net_operstate");
- d->chart_type_net_carrier = strdupz("net_carrier");
- d->chart_type_net_mtu = strdupz("net_mtu");
-
- d->chart_id_net_bytes = strdupz(d->name);
- d->chart_id_net_compressed = strdupz(d->name);
- d->chart_id_net_drops = strdupz(d->name);
- d->chart_id_net_errors = strdupz(d->name);
- d->chart_id_net_events = strdupz(d->name);
- d->chart_id_net_fifo = strdupz(d->name);
- d->chart_id_net_packets = strdupz(d->name);
- d->chart_id_net_speed = strdupz(d->name);
- d->chart_id_net_duplex = strdupz(d->name);
- d->chart_id_net_operstate = strdupz(d->name);
- d->chart_id_net_carrier = strdupz(d->name);
- d->chart_id_net_mtu = strdupz(d->name);
-
- d->chart_ctx_net_bytes = strdupz("net.net");
- d->chart_ctx_net_compressed = strdupz("net.compressed");
- d->chart_ctx_net_drops = strdupz("net.drops");
- d->chart_ctx_net_errors = strdupz("net.errors");
- d->chart_ctx_net_events = strdupz("net.events");
- d->chart_ctx_net_fifo = strdupz("net.fifo");
- d->chart_ctx_net_packets = strdupz("net.packets");
- d->chart_ctx_net_speed = strdupz("net.speed");
- d->chart_ctx_net_duplex = strdupz("net.duplex");
- d->chart_ctx_net_operstate = strdupz("net.operstate");
- d->chart_ctx_net_carrier = strdupz("net.carrier");
- d->chart_ctx_net_mtu = strdupz("net.mtu");
-
- d->chart_family = strdupz(d->name);
- d->priority = NETDATA_CHART_PRIO_FIRST_NET_IFACE;
-
- netdev_rename_lock(d);
-
- netdev_added++;
-
- // link it to the end
- if(netdev_root) {
- struct netdev *e;
- for(e = netdev_root; e->next ; e = e->next) ;
- e->next = d;
- }
- else
- netdev_root = d;
-
- return d;
-}
-
-#define NETDEV_VIRTUAL_COLLECT_DELAY 15 // 1 full run of the cgroups discovery thread (10 secs by default)
-
-int do_proc_net_dev(int update_every, usec_t dt) {
- (void)dt;
- static SIMPLE_PATTERN *disabled_list = NULL;
- static procfile *ff = NULL;
- static int enable_new_interfaces = -1;
- static int do_bandwidth = -1, do_packets = -1, do_errors = -1, do_drops = -1, do_fifo = -1, do_compressed = -1,
- do_events = -1, do_speed = -1, do_duplex = -1, do_operstate = -1, do_carrier = -1, do_mtu = -1;
- static char *path_to_sys_devices_virtual_net = NULL, *path_to_sys_class_net_speed = NULL,
- *proc_net_dev_filename = NULL;
- static char *path_to_sys_class_net_duplex = NULL;
- static char *path_to_sys_class_net_operstate = NULL;
- static char *path_to_sys_class_net_carrier = NULL;
- static char *path_to_sys_class_net_mtu = NULL;
-
- if(unlikely(enable_new_interfaces == -1)) {
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, (*netdata_configured_host_prefix)?"/proc/1/net/dev":"/proc/net/dev");
- proc_net_dev_filename = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/virtual/net/%s");
- path_to_sys_devices_virtual_net = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get virtual interfaces", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/speed");
- path_to_sys_class_net_speed = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device speed", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/duplex");
- path_to_sys_class_net_duplex = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device duplex", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/operstate");
- path_to_sys_class_net_operstate = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device operstate", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/carrier");
- path_to_sys_class_net_carrier = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device carrier", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/net/%s/mtu");
- path_to_sys_class_net_mtu = config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "path to get net device mtu", filename);
-
-
- enable_new_interfaces = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "enable new interfaces detected at runtime", CONFIG_BOOLEAN_AUTO);
-
- do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "bandwidth for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "packets for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_errors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "errors for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_drops = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "drops for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_fifo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "fifo for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_compressed = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "compressed packets for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_events = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "frames, collisions, carrier counters for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_speed = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "speed for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_duplex = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "duplex for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_operstate = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "operstate for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_carrier = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "carrier for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_mtu = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "mtu for all interfaces", CONFIG_BOOLEAN_AUTO);
-
- disabled_list = simple_pattern_create(
- config_get(CONFIG_SECTION_PLUGIN_PROC_NETDEV, "disable by default interfaces matching",
- "lo fireqos* *-ifb fwpr* fwbr* fwln*"), NULL, SIMPLE_PATTERN_EXACT, true);
- }
-
- if(unlikely(!ff)) {
- ff = procfile_open(proc_net_dev_filename, " \t,|", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- // rename all the devices, if we have pending renames
- if(unlikely(netdev_pending_renames))
- netdev_rename_all_lock();
-
- netdev_found = 0;
-
- kernel_uint_t system_rbytes = 0;
- kernel_uint_t system_tbytes = 0;
-
- time_t now = now_realtime_sec();
-
- size_t lines = procfile_lines(ff), l;
- for(l = 2; l < lines ;l++) {
- // require 17 words on each line
- if(unlikely(procfile_linewords(ff, l) < 17)) continue;
-
- char *name = procfile_lineword(ff, l, 0);
- size_t len = strlen(name);
- if(name[len - 1] == ':') name[len - 1] = '\0';
-
- struct netdev *d = get_netdev(name);
- d->updated = 1;
- netdev_found++;
-
- if(unlikely(!d->configured)) {
- // this is the first time we see this interface
-
- // remember we configured it
- d->configured = 1;
- d->discover_time = now;
-
- d->enabled = enable_new_interfaces;
-
- if(d->enabled)
- d->enabled = !simple_pattern_matches(disabled_list, d->name);
-
- char buf[FILENAME_MAX + 1];
- snprintfz(buf, FILENAME_MAX, path_to_sys_devices_virtual_net, d->name);
-
- d->virtual = likely(access(buf, R_OK) == 0) ? 1 : 0;
-
- // At least on Proxmox inside LXC: eth0 is virtual.
- // Virtual interfaces are not taken into account in system.net calculations
- if (inside_lxc_container && d->virtual && strncmp(d->name, "eth", 3) == 0)
- d->virtual = 0;
-
- if (d->virtual)
- rrdlabels_add(d->chart_labels, "interface_type", "virtual", RRDLABEL_SRC_AUTO);
- else
- rrdlabels_add(d->chart_labels, "interface_type", "real", RRDLABEL_SRC_AUTO);
-
- rrdlabels_add(d->chart_labels, "device", name, RRDLABEL_SRC_AUTO);
-
- if(likely(!d->virtual)) {
- // set the filename to get the interface speed
- snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_speed, d->name);
- d->filename_speed = strdupz(buf);
-
- snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_duplex, d->name);
- d->filename_duplex = strdupz(buf);
- }
-
- snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_operstate, d->name);
- d->filename_operstate = strdupz(buf);
-
- snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_carrier, d->name);
- d->filename_carrier = strdupz(buf);
-
- snprintfz(buf, FILENAME_MAX, path_to_sys_class_net_mtu, d->name);
- d->filename_mtu = strdupz(buf);
-
- snprintfz(buf, FILENAME_MAX, "plugin:proc:/proc/net/dev:%s", d->name);
-
- if (config_exists(buf, "enabled"))
- d->enabled = config_get_boolean_ondemand(buf, "enabled", d->enabled);
- if (config_exists(buf, "virtual"))
- d->virtual = config_get_boolean(buf, "virtual", d->virtual);
-
- if(d->enabled == CONFIG_BOOLEAN_NO)
- continue;
-
- d->do_bandwidth = do_bandwidth;
- d->do_packets = do_packets;
- d->do_errors = do_errors;
- d->do_drops = do_drops;
- d->do_fifo = do_fifo;
- d->do_compressed = do_compressed;
- d->do_events = do_events;
- d->do_speed = do_speed;
- d->do_duplex = do_duplex;
- d->do_operstate = do_operstate;
- d->do_carrier = do_carrier;
- d->do_mtu = do_mtu;
-
- if (config_exists(buf, "bandwidth"))
- d->do_bandwidth = config_get_boolean_ondemand(buf, "bandwidth", do_bandwidth);
- if (config_exists(buf, "packets"))
- d->do_packets = config_get_boolean_ondemand(buf, "packets", do_packets);
- if (config_exists(buf, "errors"))
- d->do_errors = config_get_boolean_ondemand(buf, "errors", do_errors);
- if (config_exists(buf, "drops"))
- d->do_drops = config_get_boolean_ondemand(buf, "drops", do_drops);
- if (config_exists(buf, "fifo"))
- d->do_fifo = config_get_boolean_ondemand(buf, "fifo", do_fifo);
- if (config_exists(buf, "compressed"))
- d->do_compressed = config_get_boolean_ondemand(buf, "compressed", do_compressed);
- if (config_exists(buf, "events"))
- d->do_events = config_get_boolean_ondemand(buf, "events", do_events);
- if (config_exists(buf, "speed"))
- d->do_speed = config_get_boolean_ondemand(buf, "speed", do_speed);
- if (config_exists(buf, "duplex"))
- d->do_duplex = config_get_boolean_ondemand(buf, "duplex", do_duplex);
- if (config_exists(buf, "operstate"))
- d->do_operstate = config_get_boolean_ondemand(buf, "operstate", do_operstate);
- if (config_exists(buf, "carrier"))
- d->do_carrier = config_get_boolean_ondemand(buf, "carrier", do_carrier);
- if (config_exists(buf, "mtu"))
- d->do_mtu = config_get_boolean_ondemand(buf, "mtu", do_mtu);
- }
-
- if(unlikely(!d->enabled))
- continue;
-
- // See https://github.com/netdata/netdata/issues/15206
- // This is necessary to prevent the creation of charts for virtual interfaces that will later be
- // recreated as container interfaces (create container) or
- // rediscovered and recreated only to be deleted almost immediately (stop/remove container)
- if (d->virtual && (now - d->discover_time < NETDEV_VIRTUAL_COLLECT_DELAY)) {
- continue;
- }
-
- if(likely(d->do_bandwidth != CONFIG_BOOLEAN_NO || !d->virtual)) {
- d->rbytes = str2kernel_uint_t(procfile_lineword(ff, l, 1));
- d->tbytes = str2kernel_uint_t(procfile_lineword(ff, l, 9));
-
- if(likely(!d->virtual)) {
- system_rbytes += d->rbytes;
- system_tbytes += d->tbytes;
- }
- }
-
- if(likely(d->do_packets != CONFIG_BOOLEAN_NO)) {
- d->rpackets = str2kernel_uint_t(procfile_lineword(ff, l, 2));
- d->rmulticast = str2kernel_uint_t(procfile_lineword(ff, l, 8));
- d->tpackets = str2kernel_uint_t(procfile_lineword(ff, l, 10));
- }
-
- if(likely(d->do_errors != CONFIG_BOOLEAN_NO)) {
- d->rerrors = str2kernel_uint_t(procfile_lineword(ff, l, 3));
- d->terrors = str2kernel_uint_t(procfile_lineword(ff, l, 11));
- }
-
- if(likely(d->do_drops != CONFIG_BOOLEAN_NO)) {
- d->rdrops = str2kernel_uint_t(procfile_lineword(ff, l, 4));
- d->tdrops = str2kernel_uint_t(procfile_lineword(ff, l, 12));
- }
-
- if(likely(d->do_fifo != CONFIG_BOOLEAN_NO)) {
- d->rfifo = str2kernel_uint_t(procfile_lineword(ff, l, 5));
- d->tfifo = str2kernel_uint_t(procfile_lineword(ff, l, 13));
- }
-
- if(likely(d->do_compressed != CONFIG_BOOLEAN_NO)) {
- d->rcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 7));
- d->tcompressed = str2kernel_uint_t(procfile_lineword(ff, l, 16));
- }
-
- if(likely(d->do_events != CONFIG_BOOLEAN_NO)) {
- d->rframe = str2kernel_uint_t(procfile_lineword(ff, l, 6));
- d->tcollisions = str2kernel_uint_t(procfile_lineword(ff, l, 14));
- d->tcarrier = str2kernel_uint_t(procfile_lineword(ff, l, 15));
- }
-
- if ((d->do_carrier != CONFIG_BOOLEAN_NO ||
- d->do_duplex != CONFIG_BOOLEAN_NO ||
- d->do_speed != CONFIG_BOOLEAN_NO) &&
- d->filename_carrier &&
- (d->carrier_file_exists ||
- now_monotonic_sec() - d->carrier_file_lost_time > READ_RETRY_PERIOD)) {
- if (read_single_number_file(d->filename_carrier, &d->carrier)) {
- if (d->carrier_file_exists)
- collector_error(
- "Cannot refresh interface %s carrier state by reading '%s'. Next update is in %d seconds.",
- d->name,
- d->filename_carrier,
- READ_RETRY_PERIOD);
- d->carrier_file_exists = 0;
- d->carrier_file_lost_time = now_monotonic_sec();
- } else {
- d->carrier_file_exists = 1;
- d->carrier_file_lost_time = 0;
- }
- }
-
- if (d->do_duplex != CONFIG_BOOLEAN_NO &&
- d->filename_duplex &&
- (d->carrier || d->carrier_file_exists) &&
- (d->duplex_file_exists ||
- now_monotonic_sec() - d->duplex_file_lost_time > READ_RETRY_PERIOD)) {
- char buffer[STATE_LENGTH_MAX + 1];
-
- if (read_file(d->filename_duplex, buffer, STATE_LENGTH_MAX)) {
- if (d->duplex_file_exists)
- collector_error("Cannot refresh interface %s duplex state by reading '%s'.", d->name, d->filename_duplex);
- d->duplex_file_exists = 0;
- d->duplex_file_lost_time = now_monotonic_sec();
- d->duplex = NETDEV_DUPLEX_UNKNOWN;
- } else {
- // values can be unknown, half or full -- just check the first letter for speed
- if (buffer[0] == 'f')
- d->duplex = NETDEV_DUPLEX_FULL;
- else if (buffer[0] == 'h')
- d->duplex = NETDEV_DUPLEX_HALF;
- else
- d->duplex = NETDEV_DUPLEX_UNKNOWN;
- d->duplex_file_exists = 1;
- d->duplex_file_lost_time = 0;
- }
- } else {
- d->duplex = NETDEV_DUPLEX_UNKNOWN;
- }
-
- if(d->do_operstate != CONFIG_BOOLEAN_NO && d->filename_operstate) {
- char buffer[STATE_LENGTH_MAX + 1], *trimmed_buffer;
-
- if (read_file(d->filename_operstate, buffer, STATE_LENGTH_MAX)) {
- collector_error(
- "Cannot refresh %s operstate by reading '%s'. Will not update its status anymore.",
- d->name, d->filename_operstate);
- freez(d->filename_operstate);
- d->filename_operstate = NULL;
- } else {
- trimmed_buffer = trim(buffer);
- d->operstate = get_operstate(trimmed_buffer);
- }
- }
-
- if (d->do_mtu != CONFIG_BOOLEAN_NO && d->filename_mtu) {
- if (read_single_number_file(d->filename_mtu, &d->mtu)) {
- collector_error(
- "Cannot refresh mtu for interface %s by reading '%s'. Stop updating it.", d->name, d->filename_mtu);
- freez(d->filename_mtu);
- d->filename_mtu = NULL;
- }
- }
-
- //collector_info("PROC_NET_DEV: %s speed %zu, bytes %zu/%zu, packets %zu/%zu/%zu, errors %zu/%zu, drops %zu/%zu, fifo %zu/%zu, compressed %zu/%zu, rframe %zu, tcollisions %zu, tcarrier %zu"
- // , d->name, d->speed
- // , d->rbytes, d->tbytes
- // , d->rpackets, d->tpackets, d->rmulticast
- // , d->rerrors, d->terrors
- // , d->rdrops, d->tdrops
- // , d->rfifo, d->tfifo
- // , d->rcompressed, d->tcompressed
- // , d->rframe, d->tcollisions, d->tcarrier
- // );
-
- if(unlikely(d->do_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (d->rbytes || d->tbytes || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_bandwidth = CONFIG_BOOLEAN_YES;
-
- if(d->do_bandwidth == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_bandwidth)) {
-
- d->st_bandwidth = rrdset_create_localhost(
- d->chart_type_net_bytes
- , d->chart_id_net_bytes
- , NULL
- , d->chart_family
- , d->chart_ctx_net_bytes
- , "Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_update_rrdlabels(d->st_bandwidth, d->chart_labels);
-
- d->rd_rbytes = rrddim_add(d->st_bandwidth, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tbytes = rrddim_add(d->st_bandwidth, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rbytes;
- d->rd_rbytes = d->rd_tbytes;
- d->rd_tbytes = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_bandwidth, d->rd_rbytes, (collected_number)d->rbytes);
- rrddim_set_by_pointer(d->st_bandwidth, d->rd_tbytes, (collected_number)d->tbytes);
- rrdset_done(d->st_bandwidth);
-
- if(d->cgroup_netdev_link)
- cgroup_netdev_add_bandwidth(d->cgroup_netdev_link,
- d->flipped ? d->rd_tbytes->collector.last_stored_value : -d->rd_rbytes->collector.last_stored_value,
- d->flipped ? -d->rd_rbytes->collector.last_stored_value : d->rd_tbytes->collector.last_stored_value);
-
- // update the interface speed
- if(d->filename_speed) {
- if(unlikely(!d->chart_var_speed)) {
- d->chart_var_speed =
- rrdsetvar_custom_chart_variable_add_and_acquire(d->st_bandwidth, "nic_speed_max");
- if(!d->chart_var_speed) {
- collector_error(
- "Cannot create interface %s chart variable 'nic_speed_max'. Will not update its speed anymore.",
- d->name);
- freez(d->filename_speed);
- d->filename_speed = NULL;
- }
- }
-
- if (d->filename_speed && d->chart_var_speed) {
- int ret = 0;
-
- if ((d->carrier || d->carrier_file_exists) &&
- (d->speed_file_exists || now_monotonic_sec() - d->speed_file_lost_time > READ_RETRY_PERIOD)) {
- ret = read_single_number_file(d->filename_speed, (unsigned long long *) &d->speed);
- } else {
- d->speed = 0; // TODO: this is wrong, shouldn't use 0 value, but NULL.
- }
-
- if(ret) {
- if (d->speed_file_exists)
- collector_error("Cannot refresh interface %s speed by reading '%s'.", d->name, d->filename_speed);
- d->speed_file_exists = 0;
- d->speed_file_lost_time = now_monotonic_sec();
- }
- else {
- if(d->do_speed != CONFIG_BOOLEAN_NO) {
- if(unlikely(!d->st_speed)) {
- d->st_speed = rrdset_create_localhost(
- d->chart_type_net_speed
- , d->chart_id_net_speed
- , NULL
- , d->chart_family
- , d->chart_ctx_net_speed
- , "Interface Speed"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 7
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_speed, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_speed, d->chart_labels);
-
- d->rd_speed = rrddim_add(d->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(d->st_speed, d->rd_speed, (collected_number)d->speed * KILOBITS_IN_A_MEGABIT);
- rrdset_done(d->st_speed);
- }
-
- rrdsetvar_custom_chart_variable_set(
- d->st_bandwidth, d->chart_var_speed, (NETDATA_DOUBLE)d->speed * KILOBITS_IN_A_MEGABIT);
-
- if (d->speed) {
- d->speed_file_exists = 1;
- d->speed_file_lost_time = 0;
- }
- }
- }
- }
- }
-
- if(d->do_duplex != CONFIG_BOOLEAN_NO && d->filename_duplex) {
- if(unlikely(!d->st_duplex)) {
- d->st_duplex = rrdset_create_localhost(
- d->chart_type_net_duplex
- , d->chart_id_net_duplex
- , NULL
- , d->chart_family
- , d->chart_ctx_net_duplex
- , "Interface Duplex State"
- , "state"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 8
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_duplex, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_duplex, d->chart_labels);
-
- d->rd_duplex_full = rrddim_add(d->st_duplex, "full", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_duplex_half = rrddim_add(d->st_duplex, "half", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_duplex_unknown = rrddim_add(d->st_duplex, "unknown", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(d->st_duplex, d->rd_duplex_full, (collected_number)(d->duplex == NETDEV_DUPLEX_FULL));
- rrddim_set_by_pointer(d->st_duplex, d->rd_duplex_half, (collected_number)(d->duplex == NETDEV_DUPLEX_HALF));
- rrddim_set_by_pointer(d->st_duplex, d->rd_duplex_unknown, (collected_number)(d->duplex == NETDEV_DUPLEX_UNKNOWN));
- rrdset_done(d->st_duplex);
- }
-
- if(d->do_operstate != CONFIG_BOOLEAN_NO && d->filename_operstate) {
- if(unlikely(!d->st_operstate)) {
- d->st_operstate = rrdset_create_localhost(
- d->chart_type_net_operstate
- , d->chart_id_net_operstate
- , NULL
- , d->chart_family
- , d->chart_ctx_net_operstate
- , "Interface Operational State"
- , "state"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 9
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_operstate, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_operstate, d->chart_labels);
-
- d->rd_operstate_up = rrddim_add(d->st_operstate, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_down = rrddim_add(d->st_operstate, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_notpresent = rrddim_add(d->st_operstate, "notpresent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_lowerlayerdown = rrddim_add(d->st_operstate, "lowerlayerdown", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_testing = rrddim_add(d->st_operstate, "testing", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_dormant = rrddim_add(d->st_operstate, "dormant", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_operstate_unknown = rrddim_add(d->st_operstate, "unknown", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_up, (collected_number)(d->operstate == NETDEV_OPERSTATE_UP));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_down, (collected_number)(d->operstate == NETDEV_OPERSTATE_DOWN));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_notpresent, (collected_number)(d->operstate == NETDEV_OPERSTATE_NOTPRESENT));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_lowerlayerdown, (collected_number)(d->operstate == NETDEV_OPERSTATE_LOWERLAYERDOWN));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_testing, (collected_number)(d->operstate == NETDEV_OPERSTATE_TESTING));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_dormant, (collected_number)(d->operstate == NETDEV_OPERSTATE_DORMANT));
- rrddim_set_by_pointer(d->st_operstate, d->rd_operstate_unknown, (collected_number)(d->operstate == NETDEV_OPERSTATE_UNKNOWN));
- rrdset_done(d->st_operstate);
- }
-
- if(d->do_carrier != CONFIG_BOOLEAN_NO && d->carrier_file_exists) {
- if(unlikely(!d->st_carrier)) {
- d->st_carrier = rrdset_create_localhost(
- d->chart_type_net_carrier
- , d->chart_id_net_carrier
- , NULL
- , d->chart_family
- , d->chart_ctx_net_carrier
- , "Interface Physical Link State"
- , "state"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_carrier, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_carrier, d->chart_labels);
-
- d->rd_carrier_up = rrddim_add(d->st_carrier, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- d->rd_carrier_down = rrddim_add(d->st_carrier, "down", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(d->st_carrier, d->rd_carrier_up, (collected_number)(d->carrier == 1));
- rrddim_set_by_pointer(d->st_carrier, d->rd_carrier_down, (collected_number)(d->carrier != 1));
- rrdset_done(d->st_carrier);
- }
-
- if(d->do_mtu != CONFIG_BOOLEAN_NO && d->filename_mtu) {
- if(unlikely(!d->st_mtu)) {
- d->st_mtu = rrdset_create_localhost(
- d->chart_type_net_mtu
- , d->chart_id_net_mtu
- , NULL
- , d->chart_family
- , d->chart_ctx_net_mtu
- , "Interface MTU"
- , "octets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 11
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_mtu, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_mtu, d->chart_labels);
-
- d->rd_mtu = rrddim_add(d->st_mtu, "mtu", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(d->st_mtu, d->rd_mtu, (collected_number)d->mtu);
- rrdset_done(d->st_mtu);
- }
-
- if(unlikely(d->do_packets == CONFIG_BOOLEAN_AUTO &&
- (d->rpackets || d->tpackets || d->rmulticast || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_packets = CONFIG_BOOLEAN_YES;
-
- if(d->do_packets == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_packets)) {
-
- d->st_packets = rrdset_create_localhost(
- d->chart_type_net_packets
- , d->chart_id_net_packets
- , NULL
- , d->chart_family
- , d->chart_ctx_net_packets
- , "Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_packets, d->chart_labels);
-
- d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tpackets = rrddim_add(d->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_rmulticast = rrddim_add(d->st_packets, "multicast", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rpackets;
- d->rd_rpackets = d->rd_tpackets;
- d->rd_tpackets = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_packets, d->rd_rpackets, (collected_number)d->rpackets);
- rrddim_set_by_pointer(d->st_packets, d->rd_tpackets, (collected_number)d->tpackets);
- rrddim_set_by_pointer(d->st_packets, d->rd_rmulticast, (collected_number)d->rmulticast);
- rrdset_done(d->st_packets);
- }
-
- if(unlikely(d->do_errors == CONFIG_BOOLEAN_AUTO &&
- (d->rerrors || d->terrors || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_errors = CONFIG_BOOLEAN_YES;
-
- if(d->do_errors == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_errors)) {
-
- d->st_errors = rrdset_create_localhost(
- d->chart_type_net_errors
- , d->chart_id_net_errors
- , NULL
- , d->chart_family
- , d->chart_ctx_net_errors
- , "Interface Errors"
- , "errors/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 2
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_errors, d->chart_labels);
-
- d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_terrors = rrddim_add(d->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rerrors;
- d->rd_rerrors = d->rd_terrors;
- d->rd_terrors = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_errors, d->rd_rerrors, (collected_number)d->rerrors);
- rrddim_set_by_pointer(d->st_errors, d->rd_terrors, (collected_number)d->terrors);
- rrdset_done(d->st_errors);
- }
-
- if(unlikely(d->do_drops == CONFIG_BOOLEAN_AUTO &&
- (d->rdrops || d->tdrops || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_drops = CONFIG_BOOLEAN_YES;
-
- if(d->do_drops == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_drops)) {
-
- d->st_drops = rrdset_create_localhost(
- d->chart_type_net_drops
- , d->chart_id_net_drops
- , NULL
- , d->chart_family
- , d->chart_ctx_net_drops
- , "Interface Drops"
- , "drops/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 3
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_drops, d->chart_labels);
-
- d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tdrops = rrddim_add(d->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rdrops;
- d->rd_rdrops = d->rd_tdrops;
- d->rd_tdrops = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_drops, d->rd_rdrops, (collected_number)d->rdrops);
- rrddim_set_by_pointer(d->st_drops, d->rd_tdrops, (collected_number)d->tdrops);
- rrdset_done(d->st_drops);
- }
-
- if(unlikely(d->do_fifo == CONFIG_BOOLEAN_AUTO &&
- (d->rfifo || d->tfifo || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_fifo = CONFIG_BOOLEAN_YES;
-
- if(d->do_fifo == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_fifo)) {
-
- d->st_fifo = rrdset_create_localhost(
- d->chart_type_net_fifo
- , d->chart_id_net_fifo
- , NULL
- , d->chart_family
- , d->chart_ctx_net_fifo
- , "Interface FIFO Buffer Errors"
- , "errors"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 4
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_fifo, d->chart_labels);
-
- d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tfifo = rrddim_add(d->st_fifo, "transmit", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rfifo;
- d->rd_rfifo = d->rd_tfifo;
- d->rd_tfifo = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_fifo, d->rd_rfifo, (collected_number)d->rfifo);
- rrddim_set_by_pointer(d->st_fifo, d->rd_tfifo, (collected_number)d->tfifo);
- rrdset_done(d->st_fifo);
- }
-
- if(unlikely(d->do_compressed == CONFIG_BOOLEAN_AUTO &&
- (d->rcompressed || d->tcompressed || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_compressed = CONFIG_BOOLEAN_YES;
-
- if(d->do_compressed == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_compressed)) {
-
- d->st_compressed = rrdset_create_localhost(
- d->chart_type_net_compressed
- , d->chart_id_net_compressed
- , NULL
- , d->chart_family
- , d->chart_ctx_net_compressed
- , "Compressed Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 5
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_compressed, d->chart_labels);
-
- d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tcompressed = rrddim_add(d->st_compressed, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(d->flipped) {
- // flip receive/transmit
-
- RRDDIM *td = d->rd_rcompressed;
- d->rd_rcompressed = d->rd_tcompressed;
- d->rd_tcompressed = td;
- }
- }
-
- rrddim_set_by_pointer(d->st_compressed, d->rd_rcompressed, (collected_number)d->rcompressed);
- rrddim_set_by_pointer(d->st_compressed, d->rd_tcompressed, (collected_number)d->tcompressed);
- rrdset_done(d->st_compressed);
- }
-
- if(unlikely(d->do_events == CONFIG_BOOLEAN_AUTO &&
- (d->rframe || d->tcollisions || d->tcarrier || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)))
- d->do_events = CONFIG_BOOLEAN_YES;
-
- if(d->do_events == CONFIG_BOOLEAN_YES) {
- if(unlikely(!d->st_events)) {
-
- d->st_events = rrdset_create_localhost(
- d->chart_type_net_events
- , d->chart_id_net_events
- , NULL
- , d->chart_family
- , d->chart_ctx_net_events
- , "Network Interface Events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , d->priority + 6
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
-
- rrdset_update_rrdlabels(d->st_events, d->chart_labels);
-
- d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tcollisions = rrddim_add(d->st_events, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_tcarrier = rrddim_add(d->st_events, "carrier", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(d->st_events, d->rd_rframe, (collected_number)d->rframe);
- rrddim_set_by_pointer(d->st_events, d->rd_tcollisions, (collected_number)d->tcollisions);
- rrddim_set_by_pointer(d->st_events, d->rd_tcarrier, (collected_number)d->tcarrier);
- rrdset_done(d->st_events);
- }
-
- d->function_ready = true;
- }
-
- if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (system_rbytes || system_tbytes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_bandwidth = CONFIG_BOOLEAN_YES;
- static RRDSET *st_system_net = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_system_net)) {
- st_system_net = rrdset_create_localhost(
- "system"
- , "net"
- , NULL
- , "network"
- , NULL
- , "Physical Network Interfaces Aggregated Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETDEV_NAME
- , NETDATA_CHART_PRIO_SYSTEM_NET
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_system_net, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_system_net, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_system_net, rd_in, (collected_number)system_rbytes);
- rrddim_set_by_pointer(st_system_net, rd_out, (collected_number)system_tbytes);
-
- rrdset_done(st_system_net);
- }
-
- netdev_cleanup();
-
- return 0;
-}
-
-static void netdev_main_cleanup(void *ptr)
-{
- UNUSED(ptr);
-
- collector_info("cleaning up...");
-
- worker_unregister();
-}
-
-void *netdev_main(void *ptr)
-{
- worker_register("NETDEV");
- worker_register_job_name(0, "netdev");
-
- netdata_thread_cleanup_push(netdev_main_cleanup, ptr);
-
- rrd_collector_started();
- rrd_function_add(localhost, NULL, "network-interfaces", 10, RRDFUNCTIONS_NETDEV_HELP, true, netdev_function_net_interfaces, NULL);
-
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
-
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- cgroup_netdev_reset_all();
-
- worker_is_busy(0);
-
- netdata_mutex_lock(&netdev_dev_mutex);
- if(do_proc_net_dev(localhost->rrd_update_every, hb_dt))
- break;
- netdata_mutex_unlock(&netdev_dev_mutex);
- }
-
- netdata_thread_cleanup_pop(1);
-
- return NULL;
-}
diff --git a/collectors/proc.plugin/proc_net_ip_vs_stats.c b/collectors/proc.plugin/proc_net_ip_vs_stats.c
deleted file mode 100644
index 2b9c9332e..000000000
--- a/collectors/proc.plugin/proc_net_ip_vs_stats.c
+++ /dev/null
@@ -1,123 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define RRD_TYPE_NET_IPVS "ipvs"
-#define PLUGIN_PROC_MODULE_NET_IPVS_NAME "/proc/net/ip_vs_stats"
-#define CONFIG_SECTION_PLUGIN_PROC_NET_IPVS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NET_IPVS_NAME
-
-int do_proc_net_ip_vs_stats(int update_every, usec_t dt) {
- (void)dt;
- static int do_bandwidth = -1, do_sockets = -1, do_packets = -1;
- static procfile *ff = NULL;
-
- if(do_bandwidth == -1) do_bandwidth = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS bandwidth", 1);
- if(do_sockets == -1) do_sockets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS connections", 1);
- if(do_packets == -1) do_packets = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "IPVS packets", 1);
-
- if(!ff) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/ip_vs_stats");
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NET_IPVS, "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
- }
- if(!ff) return 1;
-
- ff = procfile_readall(ff);
- if(!ff) return 0; // we return 0, so that we will retry to open it next time
-
- // make sure we have 3 lines
- if(procfile_lines(ff) < 3) return 1;
-
- // make sure we have 5 words on the 3rd line
- if(procfile_linewords(ff, 2) < 5) return 1;
-
- unsigned long long entries, InPackets, OutPackets, InBytes, OutBytes;
-
- entries = strtoull(procfile_lineword(ff, 2, 0), NULL, 16);
- InPackets = strtoull(procfile_lineword(ff, 2, 1), NULL, 16);
- OutPackets = strtoull(procfile_lineword(ff, 2, 2), NULL, 16);
- InBytes = strtoull(procfile_lineword(ff, 2, 3), NULL, 16);
- OutBytes = strtoull(procfile_lineword(ff, 2, 4), NULL, 16);
-
- if(do_sockets) {
- static RRDSET *st = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IPVS
- , "sockets"
- , NULL
- , RRD_TYPE_NET_IPVS
- , NULL
- , "IPVS New Connections"
- , "connections/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_IPVS_NAME
- , NETDATA_CHART_PRIO_IPVS_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "connections", entries);
- rrdset_done(st);
- }
-
- if(do_packets) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IPVS
- , "packets"
- , NULL
- , RRD_TYPE_NET_IPVS
- , NULL
- , "IPVS Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_IPVS_NAME
- , NETDATA_CHART_PRIO_IPVS_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "received", InPackets);
- rrddim_set(st, "sent", OutPackets);
- rrdset_done(st);
- }
-
- if(do_bandwidth) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IPVS
- , "net"
- , NULL
- , RRD_TYPE_NET_IPVS
- , NULL
- , "IPVS Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_IPVS_NAME
- , NETDATA_CHART_PRIO_IPVS_NET
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "received", InBytes);
- rrddim_set(st, "sent", OutBytes);
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_netstat.c b/collectors/proc.plugin/proc_net_netstat.c
deleted file mode 100644
index 170daad5d..000000000
--- a/collectors/proc.plugin/proc_net_netstat.c
+++ /dev/null
@@ -1,3087 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define RRD_TYPE_NET_IP "ip"
-#define RRD_TYPE_NET_IP4 "ipv4"
-#define RRD_TYPE_NET_IP6 "ipv6"
-#define PLUGIN_PROC_MODULE_NETSTAT_NAME "/proc/net/netstat"
-#define CONFIG_SECTION_PLUGIN_PROC_NETSTAT "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETSTAT_NAME
-
-static struct proc_net_snmp {
- // kernel_uint_t ip_Forwarding;
- kernel_uint_t ip_DefaultTTL;
- kernel_uint_t ip_InReceives;
- kernel_uint_t ip_InHdrErrors;
- kernel_uint_t ip_InAddrErrors;
- kernel_uint_t ip_ForwDatagrams;
- kernel_uint_t ip_InUnknownProtos;
- kernel_uint_t ip_InDiscards;
- kernel_uint_t ip_InDelivers;
- kernel_uint_t ip_OutRequests;
- kernel_uint_t ip_OutDiscards;
- kernel_uint_t ip_OutNoRoutes;
- kernel_uint_t ip_ReasmTimeout;
- kernel_uint_t ip_ReasmReqds;
- kernel_uint_t ip_ReasmOKs;
- kernel_uint_t ip_ReasmFails;
- kernel_uint_t ip_FragOKs;
- kernel_uint_t ip_FragFails;
- kernel_uint_t ip_FragCreates;
-
- kernel_uint_t icmp_InMsgs;
- kernel_uint_t icmp_OutMsgs;
- kernel_uint_t icmp_InErrors;
- kernel_uint_t icmp_OutErrors;
- kernel_uint_t icmp_InCsumErrors;
-
- kernel_uint_t icmpmsg_InEchoReps;
- kernel_uint_t icmpmsg_OutEchoReps;
- kernel_uint_t icmpmsg_InDestUnreachs;
- kernel_uint_t icmpmsg_OutDestUnreachs;
- kernel_uint_t icmpmsg_InRedirects;
- kernel_uint_t icmpmsg_OutRedirects;
- kernel_uint_t icmpmsg_InEchos;
- kernel_uint_t icmpmsg_OutEchos;
- kernel_uint_t icmpmsg_InRouterAdvert;
- kernel_uint_t icmpmsg_OutRouterAdvert;
- kernel_uint_t icmpmsg_InRouterSelect;
- kernel_uint_t icmpmsg_OutRouterSelect;
- kernel_uint_t icmpmsg_InTimeExcds;
- kernel_uint_t icmpmsg_OutTimeExcds;
- kernel_uint_t icmpmsg_InParmProbs;
- kernel_uint_t icmpmsg_OutParmProbs;
- kernel_uint_t icmpmsg_InTimestamps;
- kernel_uint_t icmpmsg_OutTimestamps;
- kernel_uint_t icmpmsg_InTimestampReps;
- kernel_uint_t icmpmsg_OutTimestampReps;
-
- //kernel_uint_t tcp_RtoAlgorithm;
- //kernel_uint_t tcp_RtoMin;
- //kernel_uint_t tcp_RtoMax;
- ssize_t tcp_MaxConn;
- kernel_uint_t tcp_ActiveOpens;
- kernel_uint_t tcp_PassiveOpens;
- kernel_uint_t tcp_AttemptFails;
- kernel_uint_t tcp_EstabResets;
- kernel_uint_t tcp_CurrEstab;
- kernel_uint_t tcp_InSegs;
- kernel_uint_t tcp_OutSegs;
- kernel_uint_t tcp_RetransSegs;
- kernel_uint_t tcp_InErrs;
- kernel_uint_t tcp_OutRsts;
- kernel_uint_t tcp_InCsumErrors;
-
- kernel_uint_t udp_InDatagrams;
- kernel_uint_t udp_NoPorts;
- kernel_uint_t udp_InErrors;
- kernel_uint_t udp_OutDatagrams;
- kernel_uint_t udp_RcvbufErrors;
- kernel_uint_t udp_SndbufErrors;
- kernel_uint_t udp_InCsumErrors;
- kernel_uint_t udp_IgnoredMulti;
-
- kernel_uint_t udplite_InDatagrams;
- kernel_uint_t udplite_NoPorts;
- kernel_uint_t udplite_InErrors;
- kernel_uint_t udplite_OutDatagrams;
- kernel_uint_t udplite_RcvbufErrors;
- kernel_uint_t udplite_SndbufErrors;
- kernel_uint_t udplite_InCsumErrors;
- kernel_uint_t udplite_IgnoredMulti;
-} snmp_root = { 0 };
-
-static void parse_line_pair(procfile *ff_netstat, ARL_BASE *base, size_t header_line, size_t values_line) {
- size_t hwords = procfile_linewords(ff_netstat, header_line);
- size_t vwords = procfile_linewords(ff_netstat, values_line);
- size_t w;
-
- if(unlikely(vwords > hwords)) {
- collector_error("File /proc/net/netstat on header line %zu has %zu words, but on value line %zu has %zu words.", header_line, hwords, values_line, vwords);
- vwords = hwords;
- }
-
- for(w = 1; w < vwords ;w++) {
- if(unlikely(arl_check(base, procfile_lineword(ff_netstat, header_line, w), procfile_lineword(ff_netstat, values_line, w))))
- break;
- }
-}
-
-static void do_proc_net_snmp6(int update_every) {
- static bool do_snmp6 = true;
-
- if (!do_snmp6) {
- return;
- }
-
- static int do_ip6_packets = -1, do_ip6_fragsout = -1, do_ip6_fragsin = -1, do_ip6_errors = -1,
- do_ip6_udplite_packets = -1, do_ip6_udplite_errors = -1, do_ip6_udp_packets = -1, do_ip6_udp_errors = -1,
- do_ip6_bandwidth = -1, do_ip6_mcast = -1, do_ip6_bcast = -1, do_ip6_mcast_p = -1, do_ip6_icmp = -1,
- do_ip6_icmp_redir = -1, do_ip6_icmp_errors = -1, do_ip6_icmp_echos = -1, do_ip6_icmp_groupmemb = -1,
- do_ip6_icmp_router = -1, do_ip6_icmp_neighbor = -1, do_ip6_icmp_mldv2 = -1, do_ip6_icmp_types = -1,
- do_ip6_ect = -1;
-
- static procfile *ff_snmp6 = NULL;
-
- static ARL_BASE *arl_ipv6 = NULL;
-
- static unsigned long long Ip6InReceives = 0ULL;
- static unsigned long long Ip6InHdrErrors = 0ULL;
- static unsigned long long Ip6InTooBigErrors = 0ULL;
- static unsigned long long Ip6InNoRoutes = 0ULL;
- static unsigned long long Ip6InAddrErrors = 0ULL;
- static unsigned long long Ip6InUnknownProtos = 0ULL;
- static unsigned long long Ip6InTruncatedPkts = 0ULL;
- static unsigned long long Ip6InDiscards = 0ULL;
- static unsigned long long Ip6InDelivers = 0ULL;
- static unsigned long long Ip6OutForwDatagrams = 0ULL;
- static unsigned long long Ip6OutRequests = 0ULL;
- static unsigned long long Ip6OutDiscards = 0ULL;
- static unsigned long long Ip6OutNoRoutes = 0ULL;
- static unsigned long long Ip6ReasmTimeout = 0ULL;
- static unsigned long long Ip6ReasmReqds = 0ULL;
- static unsigned long long Ip6ReasmOKs = 0ULL;
- static unsigned long long Ip6ReasmFails = 0ULL;
- static unsigned long long Ip6FragOKs = 0ULL;
- static unsigned long long Ip6FragFails = 0ULL;
- static unsigned long long Ip6FragCreates = 0ULL;
- static unsigned long long Ip6InMcastPkts = 0ULL;
- static unsigned long long Ip6OutMcastPkts = 0ULL;
- static unsigned long long Ip6InOctets = 0ULL;
- static unsigned long long Ip6OutOctets = 0ULL;
- static unsigned long long Ip6InMcastOctets = 0ULL;
- static unsigned long long Ip6OutMcastOctets = 0ULL;
- static unsigned long long Ip6InBcastOctets = 0ULL;
- static unsigned long long Ip6OutBcastOctets = 0ULL;
- static unsigned long long Ip6InNoECTPkts = 0ULL;
- static unsigned long long Ip6InECT1Pkts = 0ULL;
- static unsigned long long Ip6InECT0Pkts = 0ULL;
- static unsigned long long Ip6InCEPkts = 0ULL;
- static unsigned long long Icmp6InMsgs = 0ULL;
- static unsigned long long Icmp6InErrors = 0ULL;
- static unsigned long long Icmp6OutMsgs = 0ULL;
- static unsigned long long Icmp6OutErrors = 0ULL;
- static unsigned long long Icmp6InCsumErrors = 0ULL;
- static unsigned long long Icmp6InDestUnreachs = 0ULL;
- static unsigned long long Icmp6InPktTooBigs = 0ULL;
- static unsigned long long Icmp6InTimeExcds = 0ULL;
- static unsigned long long Icmp6InParmProblems = 0ULL;
- static unsigned long long Icmp6InEchos = 0ULL;
- static unsigned long long Icmp6InEchoReplies = 0ULL;
- static unsigned long long Icmp6InGroupMembQueries = 0ULL;
- static unsigned long long Icmp6InGroupMembResponses = 0ULL;
- static unsigned long long Icmp6InGroupMembReductions = 0ULL;
- static unsigned long long Icmp6InRouterSolicits = 0ULL;
- static unsigned long long Icmp6InRouterAdvertisements = 0ULL;
- static unsigned long long Icmp6InNeighborSolicits = 0ULL;
- static unsigned long long Icmp6InNeighborAdvertisements = 0ULL;
- static unsigned long long Icmp6InRedirects = 0ULL;
- static unsigned long long Icmp6InMLDv2Reports = 0ULL;
- static unsigned long long Icmp6OutDestUnreachs = 0ULL;
- static unsigned long long Icmp6OutPktTooBigs = 0ULL;
- static unsigned long long Icmp6OutTimeExcds = 0ULL;
- static unsigned long long Icmp6OutParmProblems = 0ULL;
- static unsigned long long Icmp6OutEchos = 0ULL;
- static unsigned long long Icmp6OutEchoReplies = 0ULL;
- static unsigned long long Icmp6OutGroupMembQueries = 0ULL;
- static unsigned long long Icmp6OutGroupMembResponses = 0ULL;
- static unsigned long long Icmp6OutGroupMembReductions = 0ULL;
- static unsigned long long Icmp6OutRouterSolicits = 0ULL;
- static unsigned long long Icmp6OutRouterAdvertisements = 0ULL;
- static unsigned long long Icmp6OutNeighborSolicits = 0ULL;
- static unsigned long long Icmp6OutNeighborAdvertisements = 0ULL;
- static unsigned long long Icmp6OutRedirects = 0ULL;
- static unsigned long long Icmp6OutMLDv2Reports = 0ULL;
- static unsigned long long Icmp6InType1 = 0ULL;
- static unsigned long long Icmp6InType128 = 0ULL;
- static unsigned long long Icmp6InType129 = 0ULL;
- static unsigned long long Icmp6InType136 = 0ULL;
- static unsigned long long Icmp6OutType1 = 0ULL;
- static unsigned long long Icmp6OutType128 = 0ULL;
- static unsigned long long Icmp6OutType129 = 0ULL;
- static unsigned long long Icmp6OutType133 = 0ULL;
- static unsigned long long Icmp6OutType135 = 0ULL;
- static unsigned long long Icmp6OutType143 = 0ULL;
- static unsigned long long Udp6InDatagrams = 0ULL;
- static unsigned long long Udp6NoPorts = 0ULL;
- static unsigned long long Udp6InErrors = 0ULL;
- static unsigned long long Udp6OutDatagrams = 0ULL;
- static unsigned long long Udp6RcvbufErrors = 0ULL;
- static unsigned long long Udp6SndbufErrors = 0ULL;
- static unsigned long long Udp6InCsumErrors = 0ULL;
- static unsigned long long Udp6IgnoredMulti = 0ULL;
- static unsigned long long UdpLite6InDatagrams = 0ULL;
- static unsigned long long UdpLite6NoPorts = 0ULL;
- static unsigned long long UdpLite6InErrors = 0ULL;
- static unsigned long long UdpLite6OutDatagrams = 0ULL;
- static unsigned long long UdpLite6RcvbufErrors = 0ULL;
- static unsigned long long UdpLite6SndbufErrors = 0ULL;
- static unsigned long long UdpLite6InCsumErrors = 0ULL;
-
- // prepare for /proc/net/snmp6 parsing
-
- if(unlikely(!arl_ipv6)) {
- do_ip6_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 packets", CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments sent", CONFIG_BOOLEAN_AUTO);
- do_ip6_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 fragments assembly", CONFIG_BOOLEAN_AUTO);
- do_ip6_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 errors", CONFIG_BOOLEAN_AUTO);
- do_ip6_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP packets", CONFIG_BOOLEAN_AUTO);
- do_ip6_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDP errors", CONFIG_BOOLEAN_AUTO);
- do_ip6_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite packets", CONFIG_BOOLEAN_AUTO);
- do_ip6_udplite_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ipv6 UDPlite errors", CONFIG_BOOLEAN_AUTO);
- do_ip6_bandwidth = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "bandwidth", CONFIG_BOOLEAN_AUTO);
- do_ip6_mcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
- do_ip6_bcast = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
- do_ip6_mcast_p = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "multicast packets", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_redir = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp redirects", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp errors", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_echos = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp echos", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_groupmemb = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp group membership", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_router = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp router", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_neighbor = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp neighbor", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_mldv2 = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp mldv2", CONFIG_BOOLEAN_AUTO);
- do_ip6_icmp_types = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "icmp types", CONFIG_BOOLEAN_AUTO);
- do_ip6_ect = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp6", "ect", CONFIG_BOOLEAN_AUTO);
-
- arl_ipv6 = arl_create("snmp6", NULL, 60);
- arl_expect(arl_ipv6, "Ip6InReceives", &Ip6InReceives);
- arl_expect(arl_ipv6, "Ip6InHdrErrors", &Ip6InHdrErrors);
- arl_expect(arl_ipv6, "Ip6InTooBigErrors", &Ip6InTooBigErrors);
- arl_expect(arl_ipv6, "Ip6InNoRoutes", &Ip6InNoRoutes);
- arl_expect(arl_ipv6, "Ip6InAddrErrors", &Ip6InAddrErrors);
- arl_expect(arl_ipv6, "Ip6InUnknownProtos", &Ip6InUnknownProtos);
- arl_expect(arl_ipv6, "Ip6InTruncatedPkts", &Ip6InTruncatedPkts);
- arl_expect(arl_ipv6, "Ip6InDiscards", &Ip6InDiscards);
- arl_expect(arl_ipv6, "Ip6InDelivers", &Ip6InDelivers);
- arl_expect(arl_ipv6, "Ip6OutForwDatagrams", &Ip6OutForwDatagrams);
- arl_expect(arl_ipv6, "Ip6OutRequests", &Ip6OutRequests);
- arl_expect(arl_ipv6, "Ip6OutDiscards", &Ip6OutDiscards);
- arl_expect(arl_ipv6, "Ip6OutNoRoutes", &Ip6OutNoRoutes);
- arl_expect(arl_ipv6, "Ip6ReasmTimeout", &Ip6ReasmTimeout);
- arl_expect(arl_ipv6, "Ip6ReasmReqds", &Ip6ReasmReqds);
- arl_expect(arl_ipv6, "Ip6ReasmOKs", &Ip6ReasmOKs);
- arl_expect(arl_ipv6, "Ip6ReasmFails", &Ip6ReasmFails);
- arl_expect(arl_ipv6, "Ip6FragOKs", &Ip6FragOKs);
- arl_expect(arl_ipv6, "Ip6FragFails", &Ip6FragFails);
- arl_expect(arl_ipv6, "Ip6FragCreates", &Ip6FragCreates);
- arl_expect(arl_ipv6, "Ip6InMcastPkts", &Ip6InMcastPkts);
- arl_expect(arl_ipv6, "Ip6OutMcastPkts", &Ip6OutMcastPkts);
- arl_expect(arl_ipv6, "Ip6InOctets", &Ip6InOctets);
- arl_expect(arl_ipv6, "Ip6OutOctets", &Ip6OutOctets);
- arl_expect(arl_ipv6, "Ip6InMcastOctets", &Ip6InMcastOctets);
- arl_expect(arl_ipv6, "Ip6OutMcastOctets", &Ip6OutMcastOctets);
- arl_expect(arl_ipv6, "Ip6InBcastOctets", &Ip6InBcastOctets);
- arl_expect(arl_ipv6, "Ip6OutBcastOctets", &Ip6OutBcastOctets);
- arl_expect(arl_ipv6, "Ip6InNoECTPkts", &Ip6InNoECTPkts);
- arl_expect(arl_ipv6, "Ip6InECT1Pkts", &Ip6InECT1Pkts);
- arl_expect(arl_ipv6, "Ip6InECT0Pkts", &Ip6InECT0Pkts);
- arl_expect(arl_ipv6, "Ip6InCEPkts", &Ip6InCEPkts);
- arl_expect(arl_ipv6, "Icmp6InMsgs", &Icmp6InMsgs);
- arl_expect(arl_ipv6, "Icmp6InErrors", &Icmp6InErrors);
- arl_expect(arl_ipv6, "Icmp6OutMsgs", &Icmp6OutMsgs);
- arl_expect(arl_ipv6, "Icmp6OutErrors", &Icmp6OutErrors);
- arl_expect(arl_ipv6, "Icmp6InCsumErrors", &Icmp6InCsumErrors);
- arl_expect(arl_ipv6, "Icmp6InDestUnreachs", &Icmp6InDestUnreachs);
- arl_expect(arl_ipv6, "Icmp6InPktTooBigs", &Icmp6InPktTooBigs);
- arl_expect(arl_ipv6, "Icmp6InTimeExcds", &Icmp6InTimeExcds);
- arl_expect(arl_ipv6, "Icmp6InParmProblems", &Icmp6InParmProblems);
- arl_expect(arl_ipv6, "Icmp6InEchos", &Icmp6InEchos);
- arl_expect(arl_ipv6, "Icmp6InEchoReplies", &Icmp6InEchoReplies);
- arl_expect(arl_ipv6, "Icmp6InGroupMembQueries", &Icmp6InGroupMembQueries);
- arl_expect(arl_ipv6, "Icmp6InGroupMembResponses", &Icmp6InGroupMembResponses);
- arl_expect(arl_ipv6, "Icmp6InGroupMembReductions", &Icmp6InGroupMembReductions);
- arl_expect(arl_ipv6, "Icmp6InRouterSolicits", &Icmp6InRouterSolicits);
- arl_expect(arl_ipv6, "Icmp6InRouterAdvertisements", &Icmp6InRouterAdvertisements);
- arl_expect(arl_ipv6, "Icmp6InNeighborSolicits", &Icmp6InNeighborSolicits);
- arl_expect(arl_ipv6, "Icmp6InNeighborAdvertisements", &Icmp6InNeighborAdvertisements);
- arl_expect(arl_ipv6, "Icmp6InRedirects", &Icmp6InRedirects);
- arl_expect(arl_ipv6, "Icmp6InMLDv2Reports", &Icmp6InMLDv2Reports);
- arl_expect(arl_ipv6, "Icmp6OutDestUnreachs", &Icmp6OutDestUnreachs);
- arl_expect(arl_ipv6, "Icmp6OutPktTooBigs", &Icmp6OutPktTooBigs);
- arl_expect(arl_ipv6, "Icmp6OutTimeExcds", &Icmp6OutTimeExcds);
- arl_expect(arl_ipv6, "Icmp6OutParmProblems", &Icmp6OutParmProblems);
- arl_expect(arl_ipv6, "Icmp6OutEchos", &Icmp6OutEchos);
- arl_expect(arl_ipv6, "Icmp6OutEchoReplies", &Icmp6OutEchoReplies);
- arl_expect(arl_ipv6, "Icmp6OutGroupMembQueries", &Icmp6OutGroupMembQueries);
- arl_expect(arl_ipv6, "Icmp6OutGroupMembResponses", &Icmp6OutGroupMembResponses);
- arl_expect(arl_ipv6, "Icmp6OutGroupMembReductions", &Icmp6OutGroupMembReductions);
- arl_expect(arl_ipv6, "Icmp6OutRouterSolicits", &Icmp6OutRouterSolicits);
- arl_expect(arl_ipv6, "Icmp6OutRouterAdvertisements", &Icmp6OutRouterAdvertisements);
- arl_expect(arl_ipv6, "Icmp6OutNeighborSolicits", &Icmp6OutNeighborSolicits);
- arl_expect(arl_ipv6, "Icmp6OutNeighborAdvertisements", &Icmp6OutNeighborAdvertisements);
- arl_expect(arl_ipv6, "Icmp6OutRedirects", &Icmp6OutRedirects);
- arl_expect(arl_ipv6, "Icmp6OutMLDv2Reports", &Icmp6OutMLDv2Reports);
- arl_expect(arl_ipv6, "Icmp6InType1", &Icmp6InType1);
- arl_expect(arl_ipv6, "Icmp6InType128", &Icmp6InType128);
- arl_expect(arl_ipv6, "Icmp6InType129", &Icmp6InType129);
- arl_expect(arl_ipv6, "Icmp6InType136", &Icmp6InType136);
- arl_expect(arl_ipv6, "Icmp6OutType1", &Icmp6OutType1);
- arl_expect(arl_ipv6, "Icmp6OutType128", &Icmp6OutType128);
- arl_expect(arl_ipv6, "Icmp6OutType129", &Icmp6OutType129);
- arl_expect(arl_ipv6, "Icmp6OutType133", &Icmp6OutType133);
- arl_expect(arl_ipv6, "Icmp6OutType135", &Icmp6OutType135);
- arl_expect(arl_ipv6, "Icmp6OutType143", &Icmp6OutType143);
- arl_expect(arl_ipv6, "Udp6InDatagrams", &Udp6InDatagrams);
- arl_expect(arl_ipv6, "Udp6NoPorts", &Udp6NoPorts);
- arl_expect(arl_ipv6, "Udp6InErrors", &Udp6InErrors);
- arl_expect(arl_ipv6, "Udp6OutDatagrams", &Udp6OutDatagrams);
- arl_expect(arl_ipv6, "Udp6RcvbufErrors", &Udp6RcvbufErrors);
- arl_expect(arl_ipv6, "Udp6SndbufErrors", &Udp6SndbufErrors);
- arl_expect(arl_ipv6, "Udp6InCsumErrors", &Udp6InCsumErrors);
- arl_expect(arl_ipv6, "Udp6IgnoredMulti", &Udp6IgnoredMulti);
- arl_expect(arl_ipv6, "UdpLite6InDatagrams", &UdpLite6InDatagrams);
- arl_expect(arl_ipv6, "UdpLite6NoPorts", &UdpLite6NoPorts);
- arl_expect(arl_ipv6, "UdpLite6InErrors", &UdpLite6InErrors);
- arl_expect(arl_ipv6, "UdpLite6OutDatagrams", &UdpLite6OutDatagrams);
- arl_expect(arl_ipv6, "UdpLite6RcvbufErrors", &UdpLite6RcvbufErrors);
- arl_expect(arl_ipv6, "UdpLite6SndbufErrors", &UdpLite6SndbufErrors);
- arl_expect(arl_ipv6, "UdpLite6InCsumErrors", &UdpLite6InCsumErrors);
- }
-
- // parse /proc/net/snmp
-
- if (unlikely(!ff_snmp6)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp6");
- ff_snmp6 = procfile_open(
- config_get("plugin:proc:/proc/net/snmp6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff_snmp6)) {
- do_snmp6 = false;
- return;
- }
- }
-
- ff_snmp6 = procfile_readall(ff_snmp6);
- if (unlikely(!ff_snmp6))
- return;
-
- size_t lines, l;
-
- lines = procfile_lines(ff_snmp6);
-
- arl_begin(arl_ipv6);
-
- for (l = 0; l < lines; l++) {
- size_t words = procfile_linewords(ff_snmp6, l);
- if (unlikely(words < 2)) {
- if (unlikely(words)) {
- collector_error("Cannot read /proc/net/snmp6 line %zu. Expected 2 params, read %zu.", l, words);
- continue;
- }
- }
-
- if (unlikely(arl_check(arl_ipv6, procfile_lineword(ff_snmp6, l, 0), procfile_lineword(ff_snmp6, l, 1))))
- break;
- }
-
- if(do_ip6_bandwidth == CONFIG_BOOLEAN_YES || (do_ip6_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (Ip6InOctets ||
- Ip6OutOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_bandwidth = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL,
- *rd_sent = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "ipv6"
- , NULL
- , "network"
- , NULL
- , "IPv6 Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_IPV6
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_received = rrddim_add(st, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, Ip6InOctets);
- rrddim_set_by_pointer(st, rd_sent, Ip6OutOctets);
- rrdset_done(st);
- }
-
- if(do_ip6_packets == CONFIG_BOOLEAN_YES || (do_ip6_packets == CONFIG_BOOLEAN_AUTO &&
- (Ip6InReceives ||
- Ip6OutRequests ||
- Ip6InDelivers ||
- Ip6OutForwDatagrams ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_packets = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL,
- *rd_sent = NULL,
- *rd_forwarded = NULL,
- *rd_delivers = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "packets"
- , NULL
- , "packets"
- , NULL
- , "IPv6 Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_forwarded = rrddim_add(st, "OutForwDatagrams", "forwarded", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_delivers = rrddim_add(st, "InDelivers", "delivers", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, Ip6InReceives);
- rrddim_set_by_pointer(st, rd_sent, Ip6OutRequests);
- rrddim_set_by_pointer(st, rd_forwarded, Ip6OutForwDatagrams);
- rrddim_set_by_pointer(st, rd_delivers, Ip6InDelivers);
- rrdset_done(st);
- }
-
- if(do_ip6_fragsout == CONFIG_BOOLEAN_YES || (do_ip6_fragsout == CONFIG_BOOLEAN_AUTO &&
- (Ip6FragOKs ||
- Ip6FragFails ||
- Ip6FragCreates ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsout = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL,
- *rd_failed = NULL,
- *rd_all = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "fragsout"
- , NULL
- , "fragments6"
- , NULL
- , "IPv6 Fragments Sent"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_FRAGSOUT
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_all = rrddim_add(st, "FragCreates", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, Ip6FragOKs);
- rrddim_set_by_pointer(st, rd_failed, Ip6FragFails);
- rrddim_set_by_pointer(st, rd_all, Ip6FragCreates);
- rrdset_done(st);
- }
-
- if(do_ip6_fragsin == CONFIG_BOOLEAN_YES || (do_ip6_fragsin == CONFIG_BOOLEAN_AUTO &&
- (Ip6ReasmOKs ||
- Ip6ReasmFails ||
- Ip6ReasmTimeout ||
- Ip6ReasmReqds ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_fragsin = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ok = NULL,
- *rd_failed = NULL,
- *rd_timeout = NULL,
- *rd_all = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "fragsin"
- , NULL
- , "fragments6"
- , NULL
- , "IPv6 Fragments Reassembly"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_FRAGSIN
- , update_every
- , RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_timeout = rrddim_add(st, "ReasmTimeout", "timeout", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ok, Ip6ReasmOKs);
- rrddim_set_by_pointer(st, rd_failed, Ip6ReasmFails);
- rrddim_set_by_pointer(st, rd_timeout, Ip6ReasmTimeout);
- rrddim_set_by_pointer(st, rd_all, Ip6ReasmReqds);
- rrdset_done(st);
- }
-
- if(do_ip6_errors == CONFIG_BOOLEAN_YES || (do_ip6_errors == CONFIG_BOOLEAN_AUTO &&
- (Ip6InDiscards ||
- Ip6OutDiscards ||
- Ip6InHdrErrors ||
- Ip6InAddrErrors ||
- Ip6InUnknownProtos ||
- Ip6InTooBigErrors ||
- Ip6InTruncatedPkts ||
- Ip6InNoRoutes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_errors = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InDiscards = NULL,
- *rd_OutDiscards = NULL,
- *rd_InHdrErrors = NULL,
- *rd_InAddrErrors = NULL,
- *rd_InUnknownProtos = NULL,
- *rd_InTooBigErrors = NULL,
- *rd_InTruncatedPkts = NULL,
- *rd_InNoRoutes = NULL,
- *rd_OutNoRoutes = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "errors"
- , NULL
- , "errors"
- , NULL
- , "IPv6 Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTooBigErrors = rrddim_add(st, "InTooBigErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InDiscards, Ip6InDiscards);
- rrddim_set_by_pointer(st, rd_OutDiscards, Ip6OutDiscards);
- rrddim_set_by_pointer(st, rd_InHdrErrors, Ip6InHdrErrors);
- rrddim_set_by_pointer(st, rd_InAddrErrors, Ip6InAddrErrors);
- rrddim_set_by_pointer(st, rd_InUnknownProtos, Ip6InUnknownProtos);
- rrddim_set_by_pointer(st, rd_InTooBigErrors, Ip6InTooBigErrors);
- rrddim_set_by_pointer(st, rd_InTruncatedPkts, Ip6InTruncatedPkts);
- rrddim_set_by_pointer(st, rd_InNoRoutes, Ip6InNoRoutes);
- rrddim_set_by_pointer(st, rd_OutNoRoutes, Ip6OutNoRoutes);
- rrdset_done(st);
- }
-
- if(do_ip6_udp_packets == CONFIG_BOOLEAN_YES || (do_ip6_udp_packets == CONFIG_BOOLEAN_AUTO &&
- (Udp6InDatagrams ||
- Udp6OutDatagrams ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL,
- *rd_sent = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "udppackets"
- , NULL
- , "udp6"
- , NULL
- , "IPv6 UDP Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_UDP_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, Udp6InDatagrams);
- rrddim_set_by_pointer(st, rd_sent, Udp6OutDatagrams);
- rrdset_done(st);
- }
-
- if(do_ip6_udp_errors == CONFIG_BOOLEAN_YES || (do_ip6_udp_errors == CONFIG_BOOLEAN_AUTO &&
- (Udp6InErrors ||
- Udp6NoPorts ||
- Udp6RcvbufErrors ||
- Udp6SndbufErrors ||
- Udp6InCsumErrors ||
- Udp6IgnoredMulti ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_udp_errors = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_RcvbufErrors = NULL,
- *rd_SndbufErrors = NULL,
- *rd_InErrors = NULL,
- *rd_NoPorts = NULL,
- *rd_InCsumErrors = NULL,
- *rd_IgnoredMulti = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "udperrors"
- , NULL
- , "udp6"
- , NULL
- , "IPv6 UDP Errors"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_UDP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_RcvbufErrors, Udp6RcvbufErrors);
- rrddim_set_by_pointer(st, rd_SndbufErrors, Udp6SndbufErrors);
- rrddim_set_by_pointer(st, rd_InErrors, Udp6InErrors);
- rrddim_set_by_pointer(st, rd_NoPorts, Udp6NoPorts);
- rrddim_set_by_pointer(st, rd_InCsumErrors, Udp6InCsumErrors);
- rrddim_set_by_pointer(st, rd_IgnoredMulti, Udp6IgnoredMulti);
- rrdset_done(st);
- }
-
- if(do_ip6_udplite_packets == CONFIG_BOOLEAN_YES || (do_ip6_udplite_packets == CONFIG_BOOLEAN_AUTO &&
- (UdpLite6InDatagrams ||
- UdpLite6OutDatagrams ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL,
- *rd_sent = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "udplitepackets"
- , NULL
- , "udplite6"
- , NULL
- , "IPv6 UDPlite Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_UDPLITE_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, UdpLite6InDatagrams);
- rrddim_set_by_pointer(st, rd_sent, UdpLite6OutDatagrams);
- rrdset_done(st);
- }
-
- if(do_ip6_udplite_errors == CONFIG_BOOLEAN_YES || (do_ip6_udplite_errors == CONFIG_BOOLEAN_AUTO &&
- (UdpLite6InErrors ||
- UdpLite6NoPorts ||
- UdpLite6RcvbufErrors ||
- UdpLite6SndbufErrors ||
- Udp6InCsumErrors ||
- UdpLite6InCsumErrors ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_udplite_errors = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_RcvbufErrors = NULL,
- *rd_SndbufErrors = NULL,
- *rd_InErrors = NULL,
- *rd_NoPorts = NULL,
- *rd_InCsumErrors = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "udpliteerrors"
- , NULL
- , "udplite6"
- , NULL
- , "IPv6 UDP Lite Errors"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_UDPLITE_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InErrors, UdpLite6InErrors);
- rrddim_set_by_pointer(st, rd_NoPorts, UdpLite6NoPorts);
- rrddim_set_by_pointer(st, rd_RcvbufErrors, UdpLite6RcvbufErrors);
- rrddim_set_by_pointer(st, rd_SndbufErrors, UdpLite6SndbufErrors);
- rrddim_set_by_pointer(st, rd_InCsumErrors, UdpLite6InCsumErrors);
- rrdset_done(st);
- }
-
- if(do_ip6_mcast == CONFIG_BOOLEAN_YES || (do_ip6_mcast == CONFIG_BOOLEAN_AUTO &&
- (Ip6OutMcastOctets ||
- Ip6InMcastOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_mcast = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_Ip6InMcastOctets = NULL,
- *rd_Ip6OutMcastOctets = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "mcast"
- , NULL
- , "multicast6"
- , NULL
- , "IPv6 Multicast Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_MCAST
- , update_every
- , RRDSET_TYPE_AREA
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_Ip6InMcastOctets = rrddim_add(st, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_Ip6OutMcastOctets = rrddim_add(st, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_Ip6InMcastOctets, Ip6InMcastOctets);
- rrddim_set_by_pointer(st, rd_Ip6OutMcastOctets, Ip6OutMcastOctets);
- rrdset_done(st);
- }
-
- if(do_ip6_bcast == CONFIG_BOOLEAN_YES || (do_ip6_bcast == CONFIG_BOOLEAN_AUTO &&
- (Ip6OutBcastOctets ||
- Ip6InBcastOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_bcast = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_Ip6InBcastOctets = NULL,
- *rd_Ip6OutBcastOctets = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "bcast"
- , NULL
- , "broadcast6"
- , NULL
- , "IPv6 Broadcast Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_BCAST
- , update_every
- , RRDSET_TYPE_AREA
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_Ip6InBcastOctets = rrddim_add(st, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_Ip6OutBcastOctets = rrddim_add(st, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_Ip6InBcastOctets, Ip6InBcastOctets);
- rrddim_set_by_pointer(st, rd_Ip6OutBcastOctets, Ip6OutBcastOctets);
- rrdset_done(st);
- }
-
- if(do_ip6_mcast_p == CONFIG_BOOLEAN_YES || (do_ip6_mcast_p == CONFIG_BOOLEAN_AUTO &&
- (Ip6OutMcastPkts ||
- Ip6InMcastPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_mcast_p = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_Ip6InMcastPkts = NULL,
- *rd_Ip6OutMcastPkts = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "mcastpkts"
- , NULL
- , "multicast6"
- , NULL
- , "IPv6 Multicast Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_MCAST_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_Ip6InMcastPkts = rrddim_add(st, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_Ip6OutMcastPkts = rrddim_add(st, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_Ip6InMcastPkts, Ip6InMcastPkts);
- rrddim_set_by_pointer(st, rd_Ip6OutMcastPkts, Ip6OutMcastPkts);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp == CONFIG_BOOLEAN_YES || (do_ip6_icmp == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InMsgs ||
- Icmp6OutMsgs ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_Icmp6InMsgs = NULL,
- *rd_Icmp6OutMsgs = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmp"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Messages"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_Icmp6InMsgs = rrddim_add(st, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_Icmp6OutMsgs = rrddim_add(st, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_Icmp6InMsgs, Icmp6InMsgs);
- rrddim_set_by_pointer(st, rd_Icmp6OutMsgs, Icmp6OutMsgs);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_redir == CONFIG_BOOLEAN_YES || (do_ip6_icmp_redir == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InRedirects ||
- Icmp6OutRedirects ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_redir = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_Icmp6InRedirects = NULL,
- *rd_Icmp6OutRedirects = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmpredir"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Redirects"
- , "redirects/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_REDIR
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_Icmp6InRedirects = rrddim_add(st, "InRedirects", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_Icmp6OutRedirects = rrddim_add(st, "OutRedirects", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_Icmp6InRedirects, Icmp6InRedirects);
- rrddim_set_by_pointer(st, rd_Icmp6OutRedirects, Icmp6OutRedirects);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_errors == CONFIG_BOOLEAN_YES || (do_ip6_icmp_errors == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InErrors ||
- Icmp6OutErrors ||
- Icmp6InCsumErrors ||
- Icmp6InDestUnreachs ||
- Icmp6InPktTooBigs ||
- Icmp6InTimeExcds ||
- Icmp6InParmProblems ||
- Icmp6OutDestUnreachs ||
- Icmp6OutPktTooBigs ||
- Icmp6OutTimeExcds ||
- Icmp6OutParmProblems ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_errors = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InErrors = NULL,
- *rd_OutErrors = NULL,
- *rd_InCsumErrors = NULL,
- *rd_InDestUnreachs = NULL,
- *rd_InPktTooBigs = NULL,
- *rd_InTimeExcds = NULL,
- *rd_InParmProblems = NULL,
- *rd_OutDestUnreachs = NULL,
- *rd_OutPktTooBigs = NULL,
- *rd_OutTimeExcds = NULL,
- *rd_OutParmProblems = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmperrors"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Errors"
- , "errors/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutErrors = rrddim_add(st, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InDestUnreachs = rrddim_add(st, "InDestUnreachs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InPktTooBigs = rrddim_add(st, "InPktTooBigs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTimeExcds = rrddim_add(st, "InTimeExcds", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InParmProblems = rrddim_add(st, "InParmProblems", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDestUnreachs = rrddim_add(st, "OutDestUnreachs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutPktTooBigs = rrddim_add(st, "OutPktTooBigs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutTimeExcds = rrddim_add(st, "OutTimeExcds", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutParmProblems = rrddim_add(st, "OutParmProblems", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InErrors, Icmp6InErrors);
- rrddim_set_by_pointer(st, rd_OutErrors, Icmp6OutErrors);
- rrddim_set_by_pointer(st, rd_InCsumErrors, Icmp6InCsumErrors);
- rrddim_set_by_pointer(st, rd_InDestUnreachs, Icmp6InDestUnreachs);
- rrddim_set_by_pointer(st, rd_InPktTooBigs, Icmp6InPktTooBigs);
- rrddim_set_by_pointer(st, rd_InTimeExcds, Icmp6InTimeExcds);
- rrddim_set_by_pointer(st, rd_InParmProblems, Icmp6InParmProblems);
- rrddim_set_by_pointer(st, rd_OutDestUnreachs, Icmp6OutDestUnreachs);
- rrddim_set_by_pointer(st, rd_OutPktTooBigs, Icmp6OutPktTooBigs);
- rrddim_set_by_pointer(st, rd_OutTimeExcds, Icmp6OutTimeExcds);
- rrddim_set_by_pointer(st, rd_OutParmProblems, Icmp6OutParmProblems);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_echos == CONFIG_BOOLEAN_YES || (do_ip6_icmp_echos == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InEchos ||
- Icmp6OutEchos ||
- Icmp6InEchoReplies ||
- Icmp6OutEchoReplies ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_echos = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InEchos = NULL,
- *rd_OutEchos = NULL,
- *rd_InEchoReplies = NULL,
- *rd_OutEchoReplies = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmpechos"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Echo"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_ECHOS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InEchos = rrddim_add(st, "InEchos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutEchos = rrddim_add(st, "OutEchos", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InEchoReplies = rrddim_add(st, "InEchoReplies", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutEchoReplies = rrddim_add(st, "OutEchoReplies", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InEchos, Icmp6InEchos);
- rrddim_set_by_pointer(st, rd_OutEchos, Icmp6OutEchos);
- rrddim_set_by_pointer(st, rd_InEchoReplies, Icmp6InEchoReplies);
- rrddim_set_by_pointer(st, rd_OutEchoReplies, Icmp6OutEchoReplies);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_groupmemb == CONFIG_BOOLEAN_YES || (do_ip6_icmp_groupmemb == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InGroupMembQueries ||
- Icmp6OutGroupMembQueries ||
- Icmp6InGroupMembResponses ||
- Icmp6OutGroupMembResponses ||
- Icmp6InGroupMembReductions ||
- Icmp6OutGroupMembReductions ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_groupmemb = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InQueries = NULL,
- *rd_OutQueries = NULL,
- *rd_InResponses = NULL,
- *rd_OutResponses = NULL,
- *rd_InReductions = NULL,
- *rd_OutReductions = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "groupmemb"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Group Membership"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_GROUPMEMB
- , update_every
- , RRDSET_TYPE_LINE);
-
- rd_InQueries = rrddim_add(st, "InQueries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutQueries = rrddim_add(st, "OutQueries", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InResponses = rrddim_add(st, "InResponses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutResponses = rrddim_add(st, "OutResponses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InReductions = rrddim_add(st, "InReductions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutReductions = rrddim_add(st, "OutReductions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InQueries, Icmp6InGroupMembQueries);
- rrddim_set_by_pointer(st, rd_OutQueries, Icmp6OutGroupMembQueries);
- rrddim_set_by_pointer(st, rd_InResponses, Icmp6InGroupMembResponses);
- rrddim_set_by_pointer(st, rd_OutResponses, Icmp6OutGroupMembResponses);
- rrddim_set_by_pointer(st, rd_InReductions, Icmp6InGroupMembReductions);
- rrddim_set_by_pointer(st, rd_OutReductions, Icmp6OutGroupMembReductions);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_router == CONFIG_BOOLEAN_YES || (do_ip6_icmp_router == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InRouterSolicits ||
- Icmp6OutRouterSolicits ||
- Icmp6InRouterAdvertisements ||
- Icmp6OutRouterAdvertisements ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_router = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InSolicits = NULL,
- *rd_OutSolicits = NULL,
- *rd_InAdvertisements = NULL,
- *rd_OutAdvertisements = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmprouter"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 Router Messages"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_ROUTER
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InRouterSolicits);
- rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutRouterSolicits);
- rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InRouterAdvertisements);
- rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutRouterAdvertisements);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_neighbor == CONFIG_BOOLEAN_YES || (do_ip6_icmp_neighbor == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InNeighborSolicits ||
- Icmp6OutNeighborSolicits ||
- Icmp6InNeighborAdvertisements ||
- Icmp6OutNeighborAdvertisements ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_neighbor = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InSolicits = NULL,
- *rd_OutSolicits = NULL,
- *rd_InAdvertisements = NULL,
- *rd_OutAdvertisements = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmpneighbor"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 Neighbor Messages"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_NEIGHBOR
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InSolicits = rrddim_add(st, "InSolicits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutSolicits = rrddim_add(st, "OutSolicits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InAdvertisements = rrddim_add(st, "InAdvertisements", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutAdvertisements = rrddim_add(st, "OutAdvertisements", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InSolicits, Icmp6InNeighborSolicits);
- rrddim_set_by_pointer(st, rd_OutSolicits, Icmp6OutNeighborSolicits);
- rrddim_set_by_pointer(st, rd_InAdvertisements, Icmp6InNeighborAdvertisements);
- rrddim_set_by_pointer(st, rd_OutAdvertisements, Icmp6OutNeighborAdvertisements);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_mldv2 == CONFIG_BOOLEAN_YES || (do_ip6_icmp_mldv2 == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InMLDv2Reports ||
- Icmp6OutMLDv2Reports ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_mldv2 = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InMLDv2Reports = NULL,
- *rd_OutMLDv2Reports = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmpmldv2"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP MLDv2 Reports"
- , "reports/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_LDV2
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InMLDv2Reports = rrddim_add(st, "InMLDv2Reports", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutMLDv2Reports = rrddim_add(st, "OutMLDv2Reports", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InMLDv2Reports, Icmp6InMLDv2Reports);
- rrddim_set_by_pointer(st, rd_OutMLDv2Reports, Icmp6OutMLDv2Reports);
- rrdset_done(st);
- }
-
- if(do_ip6_icmp_types == CONFIG_BOOLEAN_YES || (do_ip6_icmp_types == CONFIG_BOOLEAN_AUTO &&
- (Icmp6InType1 ||
- Icmp6InType128 ||
- Icmp6InType129 ||
- Icmp6InType136 ||
- Icmp6OutType1 ||
- Icmp6OutType128 ||
- Icmp6OutType129 ||
- Icmp6OutType133 ||
- Icmp6OutType135 ||
- Icmp6OutType143 ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_icmp_types = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InType1 = NULL,
- *rd_InType128 = NULL,
- *rd_InType129 = NULL,
- *rd_InType136 = NULL,
- *rd_OutType1 = NULL,
- *rd_OutType128 = NULL,
- *rd_OutType129 = NULL,
- *rd_OutType133 = NULL,
- *rd_OutType135 = NULL,
- *rd_OutType143 = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6
- , "icmptypes"
- , NULL
- , "icmp6"
- , NULL
- , "IPv6 ICMP Types"
- , "messages/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV6_ICMP_TYPES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InType1 = rrddim_add(st, "InType1", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InType128 = rrddim_add(st, "InType128", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InType129 = rrddim_add(st, "InType129", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InType136 = rrddim_add(st, "InType136", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType1 = rrddim_add(st, "OutType1", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType128 = rrddim_add(st, "OutType128", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType129 = rrddim_add(st, "OutType129", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType133 = rrddim_add(st, "OutType133", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType135 = rrddim_add(st, "OutType135", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutType143 = rrddim_add(st, "OutType143", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InType1, Icmp6InType1);
- rrddim_set_by_pointer(st, rd_InType128, Icmp6InType128);
- rrddim_set_by_pointer(st, rd_InType129, Icmp6InType129);
- rrddim_set_by_pointer(st, rd_InType136, Icmp6InType136);
- rrddim_set_by_pointer(st, rd_OutType1, Icmp6OutType1);
- rrddim_set_by_pointer(st, rd_OutType128, Icmp6OutType128);
- rrddim_set_by_pointer(st, rd_OutType129, Icmp6OutType129);
- rrddim_set_by_pointer(st, rd_OutType133, Icmp6OutType133);
- rrddim_set_by_pointer(st, rd_OutType135, Icmp6OutType135);
- rrddim_set_by_pointer(st, rd_OutType143, Icmp6OutType143);
- rrdset_done(st);
- }
-
- if (do_ip6_ect == CONFIG_BOOLEAN_YES ||
- (do_ip6_ect == CONFIG_BOOLEAN_AUTO && (Ip6InNoECTPkts || Ip6InECT1Pkts || Ip6InECT0Pkts || Ip6InCEPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip6_ect = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_InNoECTPkts = NULL, *rd_InECT1Pkts = NULL, *rd_InECT0Pkts = NULL, *rd_InCEPkts = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP6,
- "ect",
- NULL,
- "packets",
- NULL,
- "IPv6 ECT Packets",
- "packets/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETSTAT_NAME,
- NETDATA_CHART_PRIO_IPV6_ECT,
- update_every,
- RRDSET_TYPE_LINE);
-
- rd_InNoECTPkts = rrddim_add(st, "InNoECTPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InECT1Pkts = rrddim_add(st, "InECT1Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InECT0Pkts = rrddim_add(st, "InECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCEPkts = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InNoECTPkts, Ip6InNoECTPkts);
- rrddim_set_by_pointer(st, rd_InECT1Pkts, Ip6InECT1Pkts);
- rrddim_set_by_pointer(st, rd_InECT0Pkts, Ip6InECT0Pkts);
- rrddim_set_by_pointer(st, rd_InCEPkts, Ip6InCEPkts);
- rrdset_done(st);
- }
-}
-
-int do_proc_net_netstat(int update_every, usec_t dt) {
- (void)dt;
-
- static int do_bandwidth = -1, do_inerrors = -1, do_mcast = -1, do_bcast = -1, do_mcast_p = -1, do_bcast_p = -1, do_ecn = -1, \
- do_tcpext_reorder = -1, do_tcpext_syscookies = -1, do_tcpext_ofo = -1, do_tcpext_connaborts = -1, do_tcpext_memory = -1,
- do_tcpext_syn_queue = -1, do_tcpext_accept_queue = -1;
-
- static int do_ip_packets = -1, do_ip_fragsout = -1, do_ip_fragsin = -1, do_ip_errors = -1,
- do_tcp_sockets = -1, do_tcp_packets = -1, do_tcp_errors = -1, do_tcp_handshake = -1, do_tcp_opens = -1,
- do_udp_packets = -1, do_udp_errors = -1, do_icmp_packets = -1, do_icmpmsg = -1, do_udplite_packets = -1;
-
- static uint32_t hash_ipext = 0, hash_tcpext = 0;
- static uint32_t hash_ip = 0, hash_icmp = 0, hash_tcp = 0, hash_udp = 0, hash_icmpmsg = 0, hash_udplite = 0;
-
- static procfile *ff_netstat = NULL;
- static procfile *ff_snmp = NULL;
-
- static ARL_BASE *arl_tcpext = NULL;
- static ARL_BASE *arl_ipext = NULL;
-
- static ARL_BASE *arl_ip = NULL;
- static ARL_BASE *arl_icmp = NULL;
- static ARL_BASE *arl_icmpmsg = NULL;
- static ARL_BASE *arl_tcp = NULL;
- static ARL_BASE *arl_udp = NULL;
- static ARL_BASE *arl_udplite = NULL;
-
- static const RRDVAR_ACQUIRED *tcp_max_connections_var = NULL;
-
- // --------------------------------------------------------------------
- // IP
-
- // IP bandwidth
- static unsigned long long ipext_InOctets = 0;
- static unsigned long long ipext_OutOctets = 0;
-
- // IP input errors
- static unsigned long long ipext_InNoRoutes = 0;
- static unsigned long long ipext_InTruncatedPkts = 0;
- static unsigned long long ipext_InCsumErrors = 0;
-
- // IP multicast bandwidth
- static unsigned long long ipext_InMcastOctets = 0;
- static unsigned long long ipext_OutMcastOctets = 0;
-
- // IP multicast packets
- static unsigned long long ipext_InMcastPkts = 0;
- static unsigned long long ipext_OutMcastPkts = 0;
-
- // IP broadcast bandwidth
- static unsigned long long ipext_InBcastOctets = 0;
- static unsigned long long ipext_OutBcastOctets = 0;
-
- // IP broadcast packets
- static unsigned long long ipext_InBcastPkts = 0;
- static unsigned long long ipext_OutBcastPkts = 0;
-
- // IP ECN
- static unsigned long long ipext_InNoECTPkts = 0;
- static unsigned long long ipext_InECT1Pkts = 0;
- static unsigned long long ipext_InECT0Pkts = 0;
- static unsigned long long ipext_InCEPkts = 0;
-
- // --------------------------------------------------------------------
- // IP TCP
-
- // IP TCP Reordering
- static unsigned long long tcpext_TCPRenoReorder = 0;
- static unsigned long long tcpext_TCPFACKReorder = 0;
- static unsigned long long tcpext_TCPSACKReorder = 0;
- static unsigned long long tcpext_TCPTSReorder = 0;
-
- // IP TCP SYN Cookies
- static unsigned long long tcpext_SyncookiesSent = 0;
- static unsigned long long tcpext_SyncookiesRecv = 0;
- static unsigned long long tcpext_SyncookiesFailed = 0;
-
- // IP TCP Out Of Order Queue
- // http://www.spinics.net/lists/netdev/msg204696.html
- static unsigned long long tcpext_TCPOFOQueue = 0; // Number of packets queued in OFO queue
- static unsigned long long tcpext_TCPOFODrop = 0; // Number of packets meant to be queued in OFO but dropped because socket rcvbuf limit hit.
- static unsigned long long tcpext_TCPOFOMerge = 0; // Number of packets in OFO that were merged with other packets.
- static unsigned long long tcpext_OfoPruned = 0; // packets dropped from out-of-order queue because of socket buffer overrun
-
- // IP TCP connection resets
- // https://github.com/ecki/net-tools/blob/bd8bceaed2311651710331a7f8990c3e31be9840/statistics.c
- static unsigned long long tcpext_TCPAbortOnData = 0; // connections reset due to unexpected data
- static unsigned long long tcpext_TCPAbortOnClose = 0; // connections reset due to early user close
- static unsigned long long tcpext_TCPAbortOnMemory = 0; // connections aborted due to memory pressure
- static unsigned long long tcpext_TCPAbortOnTimeout = 0; // connections aborted due to timeout
- static unsigned long long tcpext_TCPAbortOnLinger = 0; // connections aborted after user close in linger timeout
- static unsigned long long tcpext_TCPAbortFailed = 0; // times unable to send RST due to no memory
-
- // https://perfchron.com/2015/12/26/investigating-linux-network-issues-with-netstat-and-nstat/
- static unsigned long long tcpext_ListenOverflows = 0; // times the listen queue of a socket overflowed
- static unsigned long long tcpext_ListenDrops = 0; // SYNs to LISTEN sockets ignored
-
- // IP TCP memory pressures
- static unsigned long long tcpext_TCPMemoryPressures = 0;
-
- static unsigned long long tcpext_TCPReqQFullDrop = 0;
- static unsigned long long tcpext_TCPReqQFullDoCookies = 0;
-
- static unsigned long long tcpext_TCPSynRetrans = 0;
-
- // prepare for /proc/net/netstat parsing
-
- if(unlikely(!arl_ipext)) {
- hash_ipext = simple_hash("IpExt");
- hash_tcpext = simple_hash("TcpExt");
-
- do_bandwidth = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "bandwidth", CONFIG_BOOLEAN_AUTO);
- do_inerrors = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "input errors", CONFIG_BOOLEAN_AUTO);
- do_mcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast bandwidth", CONFIG_BOOLEAN_AUTO);
- do_bcast = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast bandwidth", CONFIG_BOOLEAN_AUTO);
- do_mcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "multicast packets", CONFIG_BOOLEAN_AUTO);
- do_bcast_p = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "broadcast packets", CONFIG_BOOLEAN_AUTO);
- do_ecn = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "ECN packets", CONFIG_BOOLEAN_AUTO);
-
- do_tcpext_reorder = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP reorders", CONFIG_BOOLEAN_AUTO);
- do_tcpext_syscookies = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN cookies", CONFIG_BOOLEAN_AUTO);
- do_tcpext_ofo = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP out-of-order queue", CONFIG_BOOLEAN_AUTO);
- do_tcpext_connaborts = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP connection aborts", CONFIG_BOOLEAN_AUTO);
- do_tcpext_memory = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP memory pressures", CONFIG_BOOLEAN_AUTO);
-
- do_tcpext_syn_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP SYN queue", CONFIG_BOOLEAN_AUTO);
- do_tcpext_accept_queue = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "TCP accept queue", CONFIG_BOOLEAN_AUTO);
-
- arl_ipext = arl_create("netstat/ipext", NULL, 60);
- arl_tcpext = arl_create("netstat/tcpext", NULL, 60);
-
- // --------------------------------------------------------------------
- // IP
-
- if(do_bandwidth != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InOctets", &ipext_InOctets);
- arl_expect(arl_ipext, "OutOctets", &ipext_OutOctets);
- }
-
- if(do_inerrors != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InNoRoutes", &ipext_InNoRoutes);
- arl_expect(arl_ipext, "InTruncatedPkts", &ipext_InTruncatedPkts);
- arl_expect(arl_ipext, "InCsumErrors", &ipext_InCsumErrors);
- }
-
- if(do_mcast != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InMcastOctets", &ipext_InMcastOctets);
- arl_expect(arl_ipext, "OutMcastOctets", &ipext_OutMcastOctets);
- }
-
- if(do_mcast_p != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InMcastPkts", &ipext_InMcastPkts);
- arl_expect(arl_ipext, "OutMcastPkts", &ipext_OutMcastPkts);
- }
-
- if(do_bcast != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InBcastPkts", &ipext_InBcastPkts);
- arl_expect(arl_ipext, "OutBcastPkts", &ipext_OutBcastPkts);
- }
-
- if(do_bcast_p != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InBcastOctets", &ipext_InBcastOctets);
- arl_expect(arl_ipext, "OutBcastOctets", &ipext_OutBcastOctets);
- }
-
- if(do_ecn != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_ipext, "InNoECTPkts", &ipext_InNoECTPkts);
- arl_expect(arl_ipext, "InECT1Pkts", &ipext_InECT1Pkts);
- arl_expect(arl_ipext, "InECT0Pkts", &ipext_InECT0Pkts);
- arl_expect(arl_ipext, "InCEPkts", &ipext_InCEPkts);
- }
-
- // --------------------------------------------------------------------
- // IP TCP
-
- if(do_tcpext_reorder != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "TCPFACKReorder", &tcpext_TCPFACKReorder);
- arl_expect(arl_tcpext, "TCPSACKReorder", &tcpext_TCPSACKReorder);
- arl_expect(arl_tcpext, "TCPRenoReorder", &tcpext_TCPRenoReorder);
- arl_expect(arl_tcpext, "TCPTSReorder", &tcpext_TCPTSReorder);
- }
-
- if(do_tcpext_syscookies != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "SyncookiesSent", &tcpext_SyncookiesSent);
- arl_expect(arl_tcpext, "SyncookiesRecv", &tcpext_SyncookiesRecv);
- arl_expect(arl_tcpext, "SyncookiesFailed", &tcpext_SyncookiesFailed);
- }
-
- if(do_tcpext_ofo != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "TCPOFOQueue", &tcpext_TCPOFOQueue);
- arl_expect(arl_tcpext, "TCPOFODrop", &tcpext_TCPOFODrop);
- arl_expect(arl_tcpext, "TCPOFOMerge", &tcpext_TCPOFOMerge);
- arl_expect(arl_tcpext, "OfoPruned", &tcpext_OfoPruned);
- }
-
- if(do_tcpext_connaborts != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "TCPAbortOnData", &tcpext_TCPAbortOnData);
- arl_expect(arl_tcpext, "TCPAbortOnClose", &tcpext_TCPAbortOnClose);
- arl_expect(arl_tcpext, "TCPAbortOnMemory", &tcpext_TCPAbortOnMemory);
- arl_expect(arl_tcpext, "TCPAbortOnTimeout", &tcpext_TCPAbortOnTimeout);
- arl_expect(arl_tcpext, "TCPAbortOnLinger", &tcpext_TCPAbortOnLinger);
- arl_expect(arl_tcpext, "TCPAbortFailed", &tcpext_TCPAbortFailed);
- }
-
- if(do_tcpext_memory != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "TCPMemoryPressures", &tcpext_TCPMemoryPressures);
- }
-
- if(do_tcpext_accept_queue != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "ListenOverflows", &tcpext_ListenOverflows);
- arl_expect(arl_tcpext, "ListenDrops", &tcpext_ListenDrops);
- }
-
- if(do_tcpext_syn_queue != CONFIG_BOOLEAN_NO) {
- arl_expect(arl_tcpext, "TCPReqQFullDrop", &tcpext_TCPReqQFullDrop);
- arl_expect(arl_tcpext, "TCPReqQFullDoCookies", &tcpext_TCPReqQFullDoCookies);
- }
-
- arl_expect(arl_tcpext, "TCPSynRetrans", &tcpext_TCPSynRetrans);
- }
-
- // prepare for /proc/net/snmp parsing
-
- if(unlikely(!arl_ip)) {
- do_ip_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 packets", CONFIG_BOOLEAN_AUTO);
- do_ip_fragsout = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments sent", CONFIG_BOOLEAN_AUTO);
- do_ip_fragsin = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 fragments assembly", CONFIG_BOOLEAN_AUTO);
- do_ip_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 errors", CONFIG_BOOLEAN_AUTO);
- do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP connections", CONFIG_BOOLEAN_AUTO);
- do_tcp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP packets", CONFIG_BOOLEAN_AUTO);
- do_tcp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP errors", CONFIG_BOOLEAN_AUTO);
- do_tcp_opens = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP opens", CONFIG_BOOLEAN_AUTO);
- do_tcp_handshake = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 TCP handshake issues", CONFIG_BOOLEAN_AUTO);
- do_udp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP packets", CONFIG_BOOLEAN_AUTO);
- do_udp_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDP errors", CONFIG_BOOLEAN_AUTO);
- do_icmp_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP packets", CONFIG_BOOLEAN_AUTO);
- do_icmpmsg = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 ICMP messages", CONFIG_BOOLEAN_AUTO);
- do_udplite_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/snmp", "ipv4 UDPLite packets", CONFIG_BOOLEAN_AUTO);
-
- hash_ip = simple_hash("Ip");
- hash_tcp = simple_hash("Tcp");
- hash_udp = simple_hash("Udp");
- hash_icmp = simple_hash("Icmp");
- hash_icmpmsg = simple_hash("IcmpMsg");
- hash_udplite = simple_hash("UdpLite");
-
- arl_ip = arl_create("snmp/Ip", arl_callback_str2kernel_uint_t, 60);
- // arl_expect(arl_ip, "Forwarding", &snmp_root.ip_Forwarding);
- arl_expect(arl_ip, "DefaultTTL", &snmp_root.ip_DefaultTTL);
- arl_expect(arl_ip, "InReceives", &snmp_root.ip_InReceives);
- arl_expect(arl_ip, "InHdrErrors", &snmp_root.ip_InHdrErrors);
- arl_expect(arl_ip, "InAddrErrors", &snmp_root.ip_InAddrErrors);
- arl_expect(arl_ip, "ForwDatagrams", &snmp_root.ip_ForwDatagrams);
- arl_expect(arl_ip, "InUnknownProtos", &snmp_root.ip_InUnknownProtos);
- arl_expect(arl_ip, "InDiscards", &snmp_root.ip_InDiscards);
- arl_expect(arl_ip, "InDelivers", &snmp_root.ip_InDelivers);
- arl_expect(arl_ip, "OutRequests", &snmp_root.ip_OutRequests);
- arl_expect(arl_ip, "OutDiscards", &snmp_root.ip_OutDiscards);
- arl_expect(arl_ip, "OutNoRoutes", &snmp_root.ip_OutNoRoutes);
- arl_expect(arl_ip, "ReasmTimeout", &snmp_root.ip_ReasmTimeout);
- arl_expect(arl_ip, "ReasmReqds", &snmp_root.ip_ReasmReqds);
- arl_expect(arl_ip, "ReasmOKs", &snmp_root.ip_ReasmOKs);
- arl_expect(arl_ip, "ReasmFails", &snmp_root.ip_ReasmFails);
- arl_expect(arl_ip, "FragOKs", &snmp_root.ip_FragOKs);
- arl_expect(arl_ip, "FragFails", &snmp_root.ip_FragFails);
- arl_expect(arl_ip, "FragCreates", &snmp_root.ip_FragCreates);
-
- arl_icmp = arl_create("snmp/Icmp", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_icmp, "InMsgs", &snmp_root.icmp_InMsgs);
- arl_expect(arl_icmp, "OutMsgs", &snmp_root.icmp_OutMsgs);
- arl_expect(arl_icmp, "InErrors", &snmp_root.icmp_InErrors);
- arl_expect(arl_icmp, "OutErrors", &snmp_root.icmp_OutErrors);
- arl_expect(arl_icmp, "InCsumErrors", &snmp_root.icmp_InCsumErrors);
-
- arl_icmpmsg = arl_create("snmp/Icmpmsg", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_icmpmsg, "InType0", &snmp_root.icmpmsg_InEchoReps);
- arl_expect(arl_icmpmsg, "OutType0", &snmp_root.icmpmsg_OutEchoReps);
- arl_expect(arl_icmpmsg, "InType3", &snmp_root.icmpmsg_InDestUnreachs);
- arl_expect(arl_icmpmsg, "OutType3", &snmp_root.icmpmsg_OutDestUnreachs);
- arl_expect(arl_icmpmsg, "InType5", &snmp_root.icmpmsg_InRedirects);
- arl_expect(arl_icmpmsg, "OutType5", &snmp_root.icmpmsg_OutRedirects);
- arl_expect(arl_icmpmsg, "InType8", &snmp_root.icmpmsg_InEchos);
- arl_expect(arl_icmpmsg, "OutType8", &snmp_root.icmpmsg_OutEchos);
- arl_expect(arl_icmpmsg, "InType9", &snmp_root.icmpmsg_InRouterAdvert);
- arl_expect(arl_icmpmsg, "OutType9", &snmp_root.icmpmsg_OutRouterAdvert);
- arl_expect(arl_icmpmsg, "InType10", &snmp_root.icmpmsg_InRouterSelect);
- arl_expect(arl_icmpmsg, "OutType10", &snmp_root.icmpmsg_OutRouterSelect);
- arl_expect(arl_icmpmsg, "InType11", &snmp_root.icmpmsg_InTimeExcds);
- arl_expect(arl_icmpmsg, "OutType11", &snmp_root.icmpmsg_OutTimeExcds);
- arl_expect(arl_icmpmsg, "InType12", &snmp_root.icmpmsg_InParmProbs);
- arl_expect(arl_icmpmsg, "OutType12", &snmp_root.icmpmsg_OutParmProbs);
- arl_expect(arl_icmpmsg, "InType13", &snmp_root.icmpmsg_InTimestamps);
- arl_expect(arl_icmpmsg, "OutType13", &snmp_root.icmpmsg_OutTimestamps);
- arl_expect(arl_icmpmsg, "InType14", &snmp_root.icmpmsg_InTimestampReps);
- arl_expect(arl_icmpmsg, "OutType14", &snmp_root.icmpmsg_OutTimestampReps);
-
- arl_tcp = arl_create("snmp/Tcp", arl_callback_str2kernel_uint_t, 60);
- // arl_expect(arl_tcp, "RtoAlgorithm", &snmp_root.tcp_RtoAlgorithm);
- // arl_expect(arl_tcp, "RtoMin", &snmp_root.tcp_RtoMin);
- // arl_expect(arl_tcp, "RtoMax", &snmp_root.tcp_RtoMax);
- arl_expect_custom(arl_tcp, "MaxConn", arl_callback_ssize_t, &snmp_root.tcp_MaxConn);
- arl_expect(arl_tcp, "ActiveOpens", &snmp_root.tcp_ActiveOpens);
- arl_expect(arl_tcp, "PassiveOpens", &snmp_root.tcp_PassiveOpens);
- arl_expect(arl_tcp, "AttemptFails", &snmp_root.tcp_AttemptFails);
- arl_expect(arl_tcp, "EstabResets", &snmp_root.tcp_EstabResets);
- arl_expect(arl_tcp, "CurrEstab", &snmp_root.tcp_CurrEstab);
- arl_expect(arl_tcp, "InSegs", &snmp_root.tcp_InSegs);
- arl_expect(arl_tcp, "OutSegs", &snmp_root.tcp_OutSegs);
- arl_expect(arl_tcp, "RetransSegs", &snmp_root.tcp_RetransSegs);
- arl_expect(arl_tcp, "InErrs", &snmp_root.tcp_InErrs);
- arl_expect(arl_tcp, "OutRsts", &snmp_root.tcp_OutRsts);
- arl_expect(arl_tcp, "InCsumErrors", &snmp_root.tcp_InCsumErrors);
-
- arl_udp = arl_create("snmp/Udp", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udp, "InDatagrams", &snmp_root.udp_InDatagrams);
- arl_expect(arl_udp, "NoPorts", &snmp_root.udp_NoPorts);
- arl_expect(arl_udp, "InErrors", &snmp_root.udp_InErrors);
- arl_expect(arl_udp, "OutDatagrams", &snmp_root.udp_OutDatagrams);
- arl_expect(arl_udp, "RcvbufErrors", &snmp_root.udp_RcvbufErrors);
- arl_expect(arl_udp, "SndbufErrors", &snmp_root.udp_SndbufErrors);
- arl_expect(arl_udp, "InCsumErrors", &snmp_root.udp_InCsumErrors);
- arl_expect(arl_udp, "IgnoredMulti", &snmp_root.udp_IgnoredMulti);
-
- arl_udplite = arl_create("snmp/Udplite", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udplite, "InDatagrams", &snmp_root.udplite_InDatagrams);
- arl_expect(arl_udplite, "NoPorts", &snmp_root.udplite_NoPorts);
- arl_expect(arl_udplite, "InErrors", &snmp_root.udplite_InErrors);
- arl_expect(arl_udplite, "OutDatagrams", &snmp_root.udplite_OutDatagrams);
- arl_expect(arl_udplite, "RcvbufErrors", &snmp_root.udplite_RcvbufErrors);
- arl_expect(arl_udplite, "SndbufErrors", &snmp_root.udplite_SndbufErrors);
- arl_expect(arl_udplite, "InCsumErrors", &snmp_root.udplite_InCsumErrors);
- arl_expect(arl_udplite, "IgnoredMulti", &snmp_root.udplite_IgnoredMulti);
-
- tcp_max_connections_var = rrdvar_custom_host_variable_add_and_acquire(localhost, "tcp_max_connections");
- }
-
- size_t lines, l, words;
-
- // parse /proc/net/netstat
-
- if(unlikely(!ff_netstat)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/netstat");
- ff_netstat = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NETSTAT, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff_netstat)) return 1;
- }
-
- ff_netstat = procfile_readall(ff_netstat);
- if(unlikely(!ff_netstat)) return 0; // we return 0, so that we will retry to open it next time
-
- lines = procfile_lines(ff_netstat);
-
- arl_begin(arl_ipext);
- arl_begin(arl_tcpext);
-
- for(l = 0; l < lines ;l++) {
- char *key = procfile_lineword(ff_netstat, l, 0);
- uint32_t hash = simple_hash(key);
-
- if(unlikely(hash == hash_ipext && strcmp(key, "IpExt") == 0)) {
- size_t h = l++;
-
- words = procfile_linewords(ff_netstat, l);
- if(unlikely(words < 2)) {
- collector_error("Cannot read /proc/net/netstat IpExt line. Expected 2+ params, read %zu.", words);
- continue;
- }
-
- parse_line_pair(ff_netstat, arl_ipext, h, l);
-
- }
- else if(unlikely(hash == hash_tcpext && strcmp(key, "TcpExt") == 0)) {
- size_t h = l++;
-
- words = procfile_linewords(ff_netstat, l);
- if(unlikely(words < 2)) {
- collector_error("Cannot read /proc/net/netstat TcpExt line. Expected 2+ params, read %zu.", words);
- continue;
- }
-
- parse_line_pair(ff_netstat, arl_tcpext, h, l);
- }
- }
-
- // parse /proc/net/snmp
-
- if(unlikely(!ff_snmp)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/snmp");
- ff_snmp = procfile_open(config_get("plugin:proc:/proc/net/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff_snmp)) return 1;
- }
-
- ff_snmp = procfile_readall(ff_snmp);
- if(unlikely(!ff_snmp)) return 0; // we return 0, so that we will retry to open it next time
-
- lines = procfile_lines(ff_snmp);
- size_t w;
-
- for(l = 0; l < lines ;l++) {
- char *key = procfile_lineword(ff_snmp, l, 0);
- uint32_t hash = simple_hash(key);
-
- if(unlikely(hash == hash_ip && strcmp(key, "Ip") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "Ip") != 0) {
- collector_error("Cannot read Ip line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 3) {
- collector_error("Cannot read /proc/net/snmp Ip line. Expected 3+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_ip);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_ip, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- else if(unlikely(hash == hash_icmp && strcmp(key, "Icmp") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "Icmp") != 0) {
- collector_error("Cannot read Icmp line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 3) {
- collector_error("Cannot read /proc/net/snmp Icmp line. Expected 3+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_icmp);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_icmp, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- else if(unlikely(hash == hash_icmpmsg && strcmp(key, "IcmpMsg") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "IcmpMsg") != 0) {
- collector_error("Cannot read IcmpMsg line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 2) {
- collector_error("Cannot read /proc/net/snmp IcmpMsg line. Expected 2+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_icmpmsg);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_icmpmsg, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- else if(unlikely(hash == hash_tcp && strcmp(key, "Tcp") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "Tcp") != 0) {
- collector_error("Cannot read Tcp line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 3) {
- collector_error("Cannot read /proc/net/snmp Tcp line. Expected 3+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_tcp);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_tcp, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- else if(unlikely(hash == hash_udp && strcmp(key, "Udp") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "Udp") != 0) {
- collector_error("Cannot read Udp line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 3) {
- collector_error("Cannot read /proc/net/snmp Udp line. Expected 3+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_udp);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_udp, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- else if(unlikely(hash == hash_udplite && strcmp(key, "UdpLite") == 0)) {
- size_t h = l++;
-
- if(strcmp(procfile_lineword(ff_snmp, l, 0), "UdpLite") != 0) {
- collector_error("Cannot read UdpLite line from /proc/net/snmp.");
- break;
- }
-
- words = procfile_linewords(ff_snmp, l);
- if(words < 3) {
- collector_error("Cannot read /proc/net/snmp UdpLite line. Expected 3+ params, read %zu.", words);
- continue;
- }
-
- arl_begin(arl_udplite);
- for(w = 1; w < words ; w++) {
- if (unlikely(arl_check(arl_udplite, procfile_lineword(ff_snmp, h, w), procfile_lineword(ff_snmp, l, w)) != 0))
- break;
- }
- }
- }
-
- // netstat IpExt charts
-
- if(do_bandwidth == CONFIG_BOOLEAN_YES || (do_bandwidth == CONFIG_BOOLEAN_AUTO &&
- (ipext_InOctets ||
- ipext_OutOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_bandwidth = CONFIG_BOOLEAN_YES;
- static RRDSET *st_system_ip = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_system_ip)) {
- st_system_ip = rrdset_create_localhost(
- "system"
- , "ip" // FIXME: this is ipv4. Not changing it because it will require to do changes in cloud-frontend too
- , NULL
- , "network"
- , NULL
- , "IPv4 Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_IP
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_system_ip, "InOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_system_ip, "OutOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_system_ip, rd_in, ipext_InOctets);
- rrddim_set_by_pointer(st_system_ip, rd_out, ipext_OutOctets);
- rrdset_done(st_system_ip);
- }
-
- if(do_mcast == CONFIG_BOOLEAN_YES || (do_mcast == CONFIG_BOOLEAN_AUTO &&
- (ipext_InMcastOctets ||
- ipext_OutMcastOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_mcast = CONFIG_BOOLEAN_YES;
- static RRDSET *st_ip_mcast = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_ip_mcast)) {
- st_ip_mcast = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "mcast"
- , NULL
- , "multicast"
- , NULL
- , "IP Multicast Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_MCAST
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL);
-
- rd_in = rrddim_add(st_ip_mcast, "InMcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_ip_mcast, "OutMcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_mcast, rd_in, ipext_InMcastOctets);
- rrddim_set_by_pointer(st_ip_mcast, rd_out, ipext_OutMcastOctets);
-
- rrdset_done(st_ip_mcast);
- }
-
- // --------------------------------------------------------------------
-
- if(do_bcast == CONFIG_BOOLEAN_YES || (do_bcast == CONFIG_BOOLEAN_AUTO &&
- (ipext_InBcastOctets ||
- ipext_OutBcastOctets ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_bcast = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ip_bcast = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_ip_bcast)) {
- st_ip_bcast = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "bcast"
- , NULL
- , "broadcast"
- , NULL
- , "IPv4 Broadcast Bandwidth"
- , "kilobits/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_BCAST
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL);
-
- rd_in = rrddim_add(st_ip_bcast, "InBcastOctets", "received", 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_ip_bcast, "OutBcastOctets", "sent", -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_bcast, rd_in, ipext_InBcastOctets);
- rrddim_set_by_pointer(st_ip_bcast, rd_out, ipext_OutBcastOctets);
-
- rrdset_done(st_ip_bcast);
- }
-
- // --------------------------------------------------------------------
-
- if(do_mcast_p == CONFIG_BOOLEAN_YES || (do_mcast_p == CONFIG_BOOLEAN_AUTO &&
- (ipext_InMcastPkts ||
- ipext_OutMcastPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_mcast_p = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ip_mcastpkts = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_ip_mcastpkts)) {
- st_ip_mcastpkts = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "mcastpkts"
- , NULL
- , "multicast"
- , NULL
- , "IPv4 Multicast Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_MCAST_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL);
-
- rd_in = rrddim_add(st_ip_mcastpkts, "InMcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_ip_mcastpkts, "OutMcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_mcastpkts, rd_in, ipext_InMcastPkts);
- rrddim_set_by_pointer(st_ip_mcastpkts, rd_out, ipext_OutMcastPkts);
- rrdset_done(st_ip_mcastpkts);
- }
-
- if(do_bcast_p == CONFIG_BOOLEAN_YES || (do_bcast_p == CONFIG_BOOLEAN_AUTO &&
- (ipext_InBcastPkts ||
- ipext_OutBcastPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_bcast_p = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ip_bcastpkts = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_ip_bcastpkts)) {
- st_ip_bcastpkts = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "bcastpkts"
- , NULL
- , "broadcast"
- , NULL
- , "IPv4 Broadcast Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_BCAST_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL);
-
- rd_in = rrddim_add(st_ip_bcastpkts, "InBcastPkts", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_ip_bcastpkts, "OutBcastPkts", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_bcastpkts, rd_in, ipext_InBcastPkts);
- rrddim_set_by_pointer(st_ip_bcastpkts, rd_out, ipext_OutBcastPkts);
- rrdset_done(st_ip_bcastpkts);
- }
-
- if(do_ecn == CONFIG_BOOLEAN_YES || (do_ecn == CONFIG_BOOLEAN_AUTO &&
- (ipext_InCEPkts ||
- ipext_InECT0Pkts ||
- ipext_InECT1Pkts ||
- ipext_InNoECTPkts ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ecn = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ecnpkts = NULL;
- static RRDDIM *rd_cep = NULL, *rd_noectp = NULL, *rd_ectp0 = NULL, *rd_ectp1 = NULL;
-
- if(unlikely(!st_ecnpkts)) {
- st_ecnpkts = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "ecnpkts"
- , NULL
- , "ecn"
- , NULL
- , "IPv4 ECN Statistics"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ECN
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL);
-
- rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ectp1 = rrddim_add(st_ecnpkts, "InECT1Pkts", "ECTP1", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ecnpkts, rd_cep, ipext_InCEPkts);
- rrddim_set_by_pointer(st_ecnpkts, rd_noectp, ipext_InNoECTPkts);
- rrddim_set_by_pointer(st_ecnpkts, rd_ectp0, ipext_InECT0Pkts);
- rrddim_set_by_pointer(st_ecnpkts, rd_ectp1, ipext_InECT1Pkts);
- rrdset_done(st_ecnpkts);
- }
-
- // netstat TcpExt charts
-
- if(do_tcpext_memory == CONFIG_BOOLEAN_YES || (do_tcpext_memory == CONFIG_BOOLEAN_AUTO &&
- (tcpext_TCPMemoryPressures ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_memory = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_tcpmemorypressures = NULL;
- static RRDDIM *rd_pressures = NULL;
-
- if(unlikely(!st_tcpmemorypressures)) {
- st_tcpmemorypressures = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpmemorypressures"
- , NULL
- , "tcp"
- , NULL
- , "TCP Memory Pressures"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_MEM_PRESSURE
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_pressures = rrddim_add(st_tcpmemorypressures, "TCPMemoryPressures", "pressures", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_tcpmemorypressures, rd_pressures, tcpext_TCPMemoryPressures);
- rrdset_done(st_tcpmemorypressures);
- }
-
- if(do_tcpext_connaborts == CONFIG_BOOLEAN_YES || (do_tcpext_connaborts == CONFIG_BOOLEAN_AUTO &&
- (tcpext_TCPAbortOnData ||
- tcpext_TCPAbortOnClose ||
- tcpext_TCPAbortOnMemory ||
- tcpext_TCPAbortOnTimeout ||
- tcpext_TCPAbortOnLinger ||
- tcpext_TCPAbortFailed ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_connaborts = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_tcpconnaborts = NULL;
- static RRDDIM *rd_baddata = NULL, *rd_userclosed = NULL, *rd_nomemory = NULL, *rd_timeout = NULL, *rd_linger = NULL, *rd_failed = NULL;
-
- if(unlikely(!st_tcpconnaborts)) {
- st_tcpconnaborts = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpconnaborts"
- , NULL
- , "tcp"
- , NULL
- , "TCP Connection Aborts"
- , "connections/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_CONNABORTS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_baddata = rrddim_add(st_tcpconnaborts, "TCPAbortOnData", "baddata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_userclosed = rrddim_add(st_tcpconnaborts, "TCPAbortOnClose", "userclosed", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_nomemory = rrddim_add(st_tcpconnaborts, "TCPAbortOnMemory", "nomemory", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_timeout = rrddim_add(st_tcpconnaborts, "TCPAbortOnTimeout", "timeout", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_linger = rrddim_add(st_tcpconnaborts, "TCPAbortOnLinger", "linger", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_tcpconnaborts, "TCPAbortFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_tcpconnaborts, rd_baddata, tcpext_TCPAbortOnData);
- rrddim_set_by_pointer(st_tcpconnaborts, rd_userclosed, tcpext_TCPAbortOnClose);
- rrddim_set_by_pointer(st_tcpconnaborts, rd_nomemory, tcpext_TCPAbortOnMemory);
- rrddim_set_by_pointer(st_tcpconnaborts, rd_timeout, tcpext_TCPAbortOnTimeout);
- rrddim_set_by_pointer(st_tcpconnaborts, rd_linger, tcpext_TCPAbortOnLinger);
- rrddim_set_by_pointer(st_tcpconnaborts, rd_failed, tcpext_TCPAbortFailed);
- rrdset_done(st_tcpconnaborts);
- }
-
- if(do_tcpext_reorder == CONFIG_BOOLEAN_YES || (do_tcpext_reorder == CONFIG_BOOLEAN_AUTO &&
- (tcpext_TCPRenoReorder ||
- tcpext_TCPFACKReorder ||
- tcpext_TCPSACKReorder ||
- tcpext_TCPTSReorder ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_reorder = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_tcpreorders = NULL;
- static RRDDIM *rd_timestamp = NULL, *rd_sack = NULL, *rd_fack = NULL, *rd_reno = NULL;
-
- if(unlikely(!st_tcpreorders)) {
- st_tcpreorders = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpreorders"
- , NULL
- , "tcp"
- , NULL
- , "TCP Reordered Packets by Detection Method"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_REORDERS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_timestamp = rrddim_add(st_tcpreorders, "TCPTSReorder", "timestamp", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sack = rrddim_add(st_tcpreorders, "TCPSACKReorder", "sack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fack = rrddim_add(st_tcpreorders, "TCPFACKReorder", "fack", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_reno = rrddim_add(st_tcpreorders, "TCPRenoReorder", "reno", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_tcpreorders, rd_timestamp, tcpext_TCPTSReorder);
- rrddim_set_by_pointer(st_tcpreorders, rd_sack, tcpext_TCPSACKReorder);
- rrddim_set_by_pointer(st_tcpreorders, rd_fack, tcpext_TCPFACKReorder);
- rrddim_set_by_pointer(st_tcpreorders, rd_reno, tcpext_TCPRenoReorder);
- rrdset_done(st_tcpreorders);
- }
-
- // --------------------------------------------------------------------
-
- if(do_tcpext_ofo == CONFIG_BOOLEAN_YES || (do_tcpext_ofo == CONFIG_BOOLEAN_AUTO &&
- (tcpext_TCPOFOQueue ||
- tcpext_TCPOFODrop ||
- tcpext_TCPOFOMerge ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_ofo = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ip_tcpofo = NULL;
- static RRDDIM *rd_inqueue = NULL, *rd_dropped = NULL, *rd_merged = NULL, *rd_pruned = NULL;
-
- if(unlikely(!st_ip_tcpofo)) {
-
- st_ip_tcpofo = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpofo"
- , NULL
- , "tcp"
- , NULL
- , "TCP Out-Of-Order Queue"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_OFO
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inqueue = rrddim_add(st_ip_tcpofo, "TCPOFOQueue", "inqueue", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_dropped = rrddim_add(st_ip_tcpofo, "TCPOFODrop", "dropped", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_merged = rrddim_add(st_ip_tcpofo, "TCPOFOMerge", "merged", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_pruned = rrddim_add(st_ip_tcpofo, "OfoPruned", "pruned", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ip_tcpofo, rd_inqueue, tcpext_TCPOFOQueue);
- rrddim_set_by_pointer(st_ip_tcpofo, rd_dropped, tcpext_TCPOFODrop);
- rrddim_set_by_pointer(st_ip_tcpofo, rd_merged, tcpext_TCPOFOMerge);
- rrddim_set_by_pointer(st_ip_tcpofo, rd_pruned, tcpext_OfoPruned);
- rrdset_done(st_ip_tcpofo);
- }
-
- if(do_tcpext_syscookies == CONFIG_BOOLEAN_YES || (do_tcpext_syscookies == CONFIG_BOOLEAN_AUTO &&
- (tcpext_SyncookiesSent ||
- tcpext_SyncookiesRecv ||
- tcpext_SyncookiesFailed ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_syscookies = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_syncookies = NULL;
- static RRDDIM *rd_received = NULL, *rd_sent = NULL, *rd_failed = NULL;
-
- if(unlikely(!st_syncookies)) {
-
- st_syncookies = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpsyncookies"
- , NULL
- , "tcp"
- , NULL
- , "TCP SYN Cookies"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_SYNCOOKIES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_received = rrddim_add(st_syncookies, "SyncookiesRecv", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st_syncookies, "SyncookiesSent", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_syncookies, "SyncookiesFailed", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_syncookies, rd_received, tcpext_SyncookiesRecv);
- rrddim_set_by_pointer(st_syncookies, rd_sent, tcpext_SyncookiesSent);
- rrddim_set_by_pointer(st_syncookies, rd_failed, tcpext_SyncookiesFailed);
- rrdset_done(st_syncookies);
- }
-
- if(do_tcpext_syn_queue == CONFIG_BOOLEAN_YES || (do_tcpext_syn_queue == CONFIG_BOOLEAN_AUTO &&
- (tcpext_TCPReqQFullDrop ||
- tcpext_TCPReqQFullDoCookies ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_syn_queue = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_syn_queue = NULL;
- static RRDDIM
- *rd_TCPReqQFullDrop = NULL,
- *rd_TCPReqQFullDoCookies = NULL;
-
- if(unlikely(!st_syn_queue)) {
-
- st_syn_queue = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcp_syn_queue"
- , NULL
- , "tcp"
- , NULL
- , "TCP SYN Queue Issues"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_SYN_QUEUE
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_TCPReqQFullDrop = rrddim_add(st_syn_queue, "TCPReqQFullDrop", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_TCPReqQFullDoCookies = rrddim_add(st_syn_queue, "TCPReqQFullDoCookies", "cookies", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDrop, tcpext_TCPReqQFullDrop);
- rrddim_set_by_pointer(st_syn_queue, rd_TCPReqQFullDoCookies, tcpext_TCPReqQFullDoCookies);
- rrdset_done(st_syn_queue);
- }
-
- if(do_tcpext_accept_queue == CONFIG_BOOLEAN_YES || (do_tcpext_accept_queue == CONFIG_BOOLEAN_AUTO &&
- (tcpext_ListenOverflows ||
- tcpext_ListenDrops ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcpext_accept_queue = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_accept_queue = NULL;
- static RRDDIM *rd_overflows = NULL,
- *rd_drops = NULL;
-
- if(unlikely(!st_accept_queue)) {
-
- st_accept_queue = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcp_accept_queue"
- , NULL
- , "tcp"
- , NULL
- , "TCP Accept Queue Issues"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_ACCEPT_QUEUE
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_overflows = rrddim_add(st_accept_queue, "ListenOverflows", "overflows", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_drops = rrddim_add(st_accept_queue, "ListenDrops", "drops", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_accept_queue, rd_overflows, tcpext_ListenOverflows);
- rrddim_set_by_pointer(st_accept_queue, rd_drops, tcpext_ListenDrops);
- rrdset_done(st_accept_queue);
- }
-
- // snmp Ip charts
-
- if(do_ip_packets == CONFIG_BOOLEAN_YES || (do_ip_packets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.ip_OutRequests ||
- snmp_root.ip_InReceives ||
- snmp_root.ip_ForwDatagrams ||
- snmp_root.ip_InDelivers ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip_packets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InReceives = NULL,
- *rd_OutRequests = NULL,
- *rd_ForwDatagrams = NULL,
- *rd_InDelivers = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "packets"
- , NULL
- , "packets"
- , NULL
- , "IPv4 Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InReceives = rrddim_add(st, "InReceives", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutRequests = rrddim_add(st, "OutRequests", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ForwDatagrams = rrddim_add(st, "ForwDatagrams", "forwarded", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InDelivers = rrddim_add(st, "InDelivers", "delivered", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_OutRequests, (collected_number)snmp_root.ip_OutRequests);
- rrddim_set_by_pointer(st, rd_InReceives, (collected_number)snmp_root.ip_InReceives);
- rrddim_set_by_pointer(st, rd_ForwDatagrams, (collected_number)snmp_root.ip_ForwDatagrams);
- rrddim_set_by_pointer(st, rd_InDelivers, (collected_number)snmp_root.ip_InDelivers);
- rrdset_done(st);
- }
-
- if(do_ip_fragsout == CONFIG_BOOLEAN_YES || (do_ip_fragsout == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.ip_FragOKs ||
- snmp_root.ip_FragFails ||
- snmp_root.ip_FragCreates ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip_fragsout = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_FragOKs = NULL,
- *rd_FragFails = NULL,
- *rd_FragCreates = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "fragsout"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 Fragments Sent"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_OUT
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_FragCreates = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_FragOKs, (collected_number)snmp_root.ip_FragOKs);
- rrddim_set_by_pointer(st, rd_FragFails, (collected_number)snmp_root.ip_FragFails);
- rrddim_set_by_pointer(st, rd_FragCreates, (collected_number)snmp_root.ip_FragCreates);
- rrdset_done(st);
- }
-
- if(do_ip_fragsin == CONFIG_BOOLEAN_YES || (do_ip_fragsin == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.ip_ReasmOKs ||
- snmp_root.ip_ReasmFails ||
- snmp_root.ip_ReasmReqds ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip_fragsin = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ReasmOKs = NULL,
- *rd_ReasmFails = NULL,
- *rd_ReasmReqds = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "fragsin"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 Fragments Reassembly"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_IN
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ReasmReqds = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ReasmOKs, (collected_number)snmp_root.ip_ReasmOKs);
- rrddim_set_by_pointer(st, rd_ReasmFails, (collected_number)snmp_root.ip_ReasmFails);
- rrddim_set_by_pointer(st, rd_ReasmReqds, (collected_number)snmp_root.ip_ReasmReqds);
- rrdset_done(st);
- }
-
- if(do_ip_errors == CONFIG_BOOLEAN_YES || (do_ip_errors == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.ip_InDiscards ||
- snmp_root.ip_OutDiscards ||
- snmp_root.ip_InHdrErrors ||
- snmp_root.ip_InAddrErrors ||
- snmp_root.ip_InUnknownProtos ||
- snmp_root.ip_OutNoRoutes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ip_errors = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InDiscards = NULL,
- *rd_OutDiscards = NULL,
- *rd_InHdrErrors = NULL,
- *rd_InNoRoutes = NULL,
- *rd_OutNoRoutes = NULL,
- *rd_InAddrErrors = NULL,
- *rd_InTruncatedPkts = NULL,
- *rd_InCsumErrors = NULL,
- *rd_InUnknownProtos = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "errors"
- , NULL
- , "errors"
- , NULL
- , "IPv4 Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rd_InNoRoutes = rrddim_add(st, "InNoRoutes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutNoRoutes = rrddim_add(st, "OutNoRoutes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rd_InHdrErrors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InAddrErrors = rrddim_add(st, "InAddrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InUnknownProtos = rrddim_add(st, "InUnknownProtos", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTruncatedPkts = rrddim_add(st, "InTruncatedPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InDiscards, (collected_number)snmp_root.ip_InDiscards);
- rrddim_set_by_pointer(st, rd_OutDiscards, (collected_number)snmp_root.ip_OutDiscards);
- rrddim_set_by_pointer(st, rd_InHdrErrors, (collected_number)snmp_root.ip_InHdrErrors);
- rrddim_set_by_pointer(st, rd_InAddrErrors, (collected_number)snmp_root.ip_InAddrErrors);
- rrddim_set_by_pointer(st, rd_InUnknownProtos, (collected_number)snmp_root.ip_InUnknownProtos);
- rrddim_set_by_pointer(st, rd_InNoRoutes, (collected_number)ipext_InNoRoutes);
- rrddim_set_by_pointer(st, rd_OutNoRoutes, (collected_number)snmp_root.ip_OutNoRoutes);
- rrddim_set_by_pointer(st, rd_InTruncatedPkts, (collected_number)ipext_InTruncatedPkts);
- rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)ipext_InCsumErrors);
- rrdset_done(st);
- }
-
- // snmp Icmp charts
-
- if(do_icmp_packets == CONFIG_BOOLEAN_YES || (do_icmp_packets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.icmp_InMsgs ||
- snmp_root.icmp_OutMsgs ||
- snmp_root.icmp_InErrors ||
- snmp_root.icmp_OutErrors ||
- snmp_root.icmp_InCsumErrors ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmp_packets = CONFIG_BOOLEAN_YES;
-
- {
- static RRDSET *st_packets = NULL;
- static RRDDIM *rd_InMsgs = NULL,
- *rd_OutMsgs = NULL;
-
- if(unlikely(!st_packets)) {
- st_packets = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "icmp"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InMsgs = rrddim_add(st_packets, "InMsgs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutMsgs = rrddim_add(st_packets, "OutMsgs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_packets, rd_InMsgs, (collected_number)snmp_root.icmp_InMsgs);
- rrddim_set_by_pointer(st_packets, rd_OutMsgs, (collected_number)snmp_root.icmp_OutMsgs);
- rrdset_done(st_packets);
- }
-
- {
- static RRDSET *st_errors = NULL;
- static RRDDIM *rd_InErrors = NULL,
- *rd_OutErrors = NULL,
- *rd_InCsumErrors = NULL;
-
- if(unlikely(!st_errors)) {
- st_errors = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "icmp_errors"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InErrors = rrddim_add(st_errors, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutErrors = rrddim_add(st_errors, "OutErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st_errors, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_errors, rd_InErrors, (collected_number)snmp_root.icmp_InErrors);
- rrddim_set_by_pointer(st_errors, rd_OutErrors, (collected_number)snmp_root.icmp_OutErrors);
- rrddim_set_by_pointer(st_errors, rd_InCsumErrors, (collected_number)snmp_root.icmp_InCsumErrors);
- rrdset_done(st_errors);
- }
- }
-
- // snmp IcmpMsg charts
-
- if(do_icmpmsg == CONFIG_BOOLEAN_YES || (do_icmpmsg == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.icmpmsg_InEchoReps ||
- snmp_root.icmpmsg_OutEchoReps ||
- snmp_root.icmpmsg_InDestUnreachs ||
- snmp_root.icmpmsg_OutDestUnreachs ||
- snmp_root.icmpmsg_InRedirects ||
- snmp_root.icmpmsg_OutRedirects ||
- snmp_root.icmpmsg_InEchos ||
- snmp_root.icmpmsg_OutEchos ||
- snmp_root.icmpmsg_InRouterAdvert ||
- snmp_root.icmpmsg_OutRouterAdvert ||
- snmp_root.icmpmsg_InRouterSelect ||
- snmp_root.icmpmsg_OutRouterSelect ||
- snmp_root.icmpmsg_InTimeExcds ||
- snmp_root.icmpmsg_OutTimeExcds ||
- snmp_root.icmpmsg_InParmProbs ||
- snmp_root.icmpmsg_OutParmProbs ||
- snmp_root.icmpmsg_InTimestamps ||
- snmp_root.icmpmsg_OutTimestamps ||
- snmp_root.icmpmsg_InTimestampReps ||
- snmp_root.icmpmsg_OutTimestampReps ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_icmpmsg = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InEchoReps = NULL,
- *rd_OutEchoReps = NULL,
- *rd_InDestUnreachs = NULL,
- *rd_OutDestUnreachs = NULL,
- *rd_InRedirects = NULL,
- *rd_OutRedirects = NULL,
- *rd_InEchos = NULL,
- *rd_OutEchos = NULL,
- *rd_InRouterAdvert = NULL,
- *rd_OutRouterAdvert = NULL,
- *rd_InRouterSelect = NULL,
- *rd_OutRouterSelect = NULL,
- *rd_InTimeExcds = NULL,
- *rd_OutTimeExcds = NULL,
- *rd_InParmProbs = NULL,
- *rd_OutParmProbs = NULL,
- *rd_InTimestamps = NULL,
- *rd_OutTimestamps = NULL,
- *rd_InTimestampReps = NULL,
- *rd_OutTimestampReps = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "icmpmsg"
- , NULL
- , "icmp"
- , NULL
- , "IPv4 ICMP Messages"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InEchoReps = rrddim_add(st, "InType0", "InEchoReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutEchoReps = rrddim_add(st, "OutType0", "OutEchoReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InDestUnreachs = rrddim_add(st, "InType3", "InDestUnreachs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDestUnreachs = rrddim_add(st, "OutType3", "OutDestUnreachs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InRedirects = rrddim_add(st, "InType5", "InRedirects", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutRedirects = rrddim_add(st, "OutType5", "OutRedirects", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InEchos = rrddim_add(st, "InType8", "InEchos", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutEchos = rrddim_add(st, "OutType8", "OutEchos", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InRouterAdvert = rrddim_add(st, "InType9", "InRouterAdvert", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutRouterAdvert = rrddim_add(st, "OutType9", "OutRouterAdvert", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InRouterSelect = rrddim_add(st, "InType10", "InRouterSelect", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutRouterSelect = rrddim_add(st, "OutType10", "OutRouterSelect", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTimeExcds = rrddim_add(st, "InType11", "InTimeExcds", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutTimeExcds = rrddim_add(st, "OutType11", "OutTimeExcds", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InParmProbs = rrddim_add(st, "InType12", "InParmProbs", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutParmProbs = rrddim_add(st, "OutType12", "OutParmProbs", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTimestamps = rrddim_add(st, "InType13", "InTimestamps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutTimestamps = rrddim_add(st, "OutType13", "OutTimestamps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InTimestampReps = rrddim_add(st, "InType14", "InTimestampReps", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutTimestampReps = rrddim_add(st, "OutType14", "OutTimestampReps", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InEchoReps, (collected_number)snmp_root.icmpmsg_InEchoReps);
- rrddim_set_by_pointer(st, rd_OutEchoReps, (collected_number)snmp_root.icmpmsg_OutEchoReps);
- rrddim_set_by_pointer(st, rd_InDestUnreachs, (collected_number)snmp_root.icmpmsg_InDestUnreachs);
- rrddim_set_by_pointer(st, rd_OutDestUnreachs, (collected_number)snmp_root.icmpmsg_OutDestUnreachs);
- rrddim_set_by_pointer(st, rd_InRedirects, (collected_number)snmp_root.icmpmsg_InRedirects);
- rrddim_set_by_pointer(st, rd_OutRedirects, (collected_number)snmp_root.icmpmsg_OutRedirects);
- rrddim_set_by_pointer(st, rd_InEchos, (collected_number)snmp_root.icmpmsg_InEchos);
- rrddim_set_by_pointer(st, rd_OutEchos, (collected_number)snmp_root.icmpmsg_OutEchos);
- rrddim_set_by_pointer(st, rd_InRouterAdvert, (collected_number)snmp_root.icmpmsg_InRouterAdvert);
- rrddim_set_by_pointer(st, rd_OutRouterAdvert, (collected_number)snmp_root.icmpmsg_OutRouterAdvert);
- rrddim_set_by_pointer(st, rd_InRouterSelect, (collected_number)snmp_root.icmpmsg_InRouterSelect);
- rrddim_set_by_pointer(st, rd_OutRouterSelect, (collected_number)snmp_root.icmpmsg_OutRouterSelect);
- rrddim_set_by_pointer(st, rd_InTimeExcds, (collected_number)snmp_root.icmpmsg_InTimeExcds);
- rrddim_set_by_pointer(st, rd_OutTimeExcds, (collected_number)snmp_root.icmpmsg_OutTimeExcds);
- rrddim_set_by_pointer(st, rd_InParmProbs, (collected_number)snmp_root.icmpmsg_InParmProbs);
- rrddim_set_by_pointer(st, rd_OutParmProbs, (collected_number)snmp_root.icmpmsg_OutParmProbs);
- rrddim_set_by_pointer(st, rd_InTimestamps, (collected_number)snmp_root.icmpmsg_InTimestamps);
- rrddim_set_by_pointer(st, rd_OutTimestamps, (collected_number)snmp_root.icmpmsg_OutTimestamps);
- rrddim_set_by_pointer(st, rd_InTimestampReps, (collected_number)snmp_root.icmpmsg_InTimestampReps);
- rrddim_set_by_pointer(st, rd_OutTimestampReps, (collected_number)snmp_root.icmpmsg_OutTimestampReps);
-
- rrdset_done(st);
- }
-
- // snmp Tcp charts
-
- // this is smart enough to update it, only when it is changed
- rrdvar_custom_host_variable_set(localhost, tcp_max_connections_var, snmp_root.tcp_MaxConn);
-
- // see http://net-snmp.sourceforge.net/docs/mibs/tcp.html
- if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.tcp_CurrEstab ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_CurrEstab = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpsock"
- , NULL
- , "tcp"
- , NULL
- , "TCP Connections"
- , "active connections"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_ESTABLISHED_CONNS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_CurrEstab = rrddim_add(st, "CurrEstab", "connections", 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_CurrEstab, (collected_number)snmp_root.tcp_CurrEstab);
- rrdset_done(st);
- }
-
- if(do_tcp_packets == CONFIG_BOOLEAN_YES || (do_tcp_packets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.tcp_InSegs ||
- snmp_root.tcp_OutSegs ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_packets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InSegs = NULL,
- *rd_OutSegs = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcppackets"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InSegs = rrddim_add(st, "InSegs", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutSegs = rrddim_add(st, "OutSegs", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InSegs, (collected_number)snmp_root.tcp_InSegs);
- rrddim_set_by_pointer(st, rd_OutSegs, (collected_number)snmp_root.tcp_OutSegs);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_tcp_errors == CONFIG_BOOLEAN_YES || (do_tcp_errors == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.tcp_InErrs ||
- snmp_root.tcp_InCsumErrors ||
- snmp_root.tcp_RetransSegs ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_errors = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InErrs = NULL,
- *rd_InCsumErrors = NULL,
- *rd_RetransSegs = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcperrors"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_RetransSegs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InErrs, (collected_number)snmp_root.tcp_InErrs);
- rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.tcp_InCsumErrors);
- rrddim_set_by_pointer(st, rd_RetransSegs, (collected_number)snmp_root.tcp_RetransSegs);
- rrdset_done(st);
- }
-
- if(do_tcp_opens == CONFIG_BOOLEAN_YES || (do_tcp_opens == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.tcp_ActiveOpens ||
- snmp_root.tcp_PassiveOpens ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_opens = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_ActiveOpens = NULL,
- *rd_PassiveOpens = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcpopens"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Opens"
- , "connections/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_OPENS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_ActiveOpens, (collected_number)snmp_root.tcp_ActiveOpens);
- rrddim_set_by_pointer(st, rd_PassiveOpens, (collected_number)snmp_root.tcp_PassiveOpens);
- rrdset_done(st);
- }
-
- if(do_tcp_handshake == CONFIG_BOOLEAN_YES || (do_tcp_handshake == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.tcp_EstabResets ||
- snmp_root.tcp_OutRsts ||
- snmp_root.tcp_AttemptFails ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_handshake = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_EstabResets = NULL,
- *rd_OutRsts = NULL,
- *rd_AttemptFails = NULL,
- *rd_TCPSynRetrans = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP
- , "tcphandshake"
- , NULL
- , "tcp"
- , NULL
- , "IPv4 TCP Handshake Issues"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IP_TCP_HANDSHAKE
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_AttemptFails = rrddim_add(st, "AttemptFails", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_TCPSynRetrans = rrddim_add(st, "TCPSynRetrans", "SynRetrans", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_EstabResets, (collected_number)snmp_root.tcp_EstabResets);
- rrddim_set_by_pointer(st, rd_OutRsts, (collected_number)snmp_root.tcp_OutRsts);
- rrddim_set_by_pointer(st, rd_AttemptFails, (collected_number)snmp_root.tcp_AttemptFails);
- rrddim_set_by_pointer(st, rd_TCPSynRetrans, tcpext_TCPSynRetrans);
- rrdset_done(st);
- }
-
- // snmp Udp charts
-
- // see http://net-snmp.sourceforge.net/docs/mibs/udp.html
- if(do_udp_packets == CONFIG_BOOLEAN_YES || (do_udp_packets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.udp_InDatagrams ||
- snmp_root.udp_OutDatagrams ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udp_packets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_InDatagrams = NULL,
- *rd_OutDatagrams = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "udppackets"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udp_InDatagrams);
- rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udp_OutDatagrams);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_udp_errors == CONFIG_BOOLEAN_YES || (do_udp_errors == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.udp_InErrors ||
- snmp_root.udp_NoPorts ||
- snmp_root.udp_RcvbufErrors ||
- snmp_root.udp_SndbufErrors ||
- snmp_root.udp_InCsumErrors ||
- snmp_root.udp_IgnoredMulti ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udp_errors = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_RcvbufErrors = NULL,
- *rd_SndbufErrors = NULL,
- *rd_InErrors = NULL,
- *rd_NoPorts = NULL,
- *rd_InCsumErrors = NULL,
- *rd_IgnoredMulti = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "udperrors"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Errors"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udp_InErrors);
- rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udp_NoPorts);
- rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udp_RcvbufErrors);
- rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udp_SndbufErrors);
- rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udp_InCsumErrors);
- rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udp_IgnoredMulti);
- rrdset_done(st);
- }
-
- // snmp UdpLite charts
-
- if(do_udplite_packets == CONFIG_BOOLEAN_YES || (do_udplite_packets == CONFIG_BOOLEAN_AUTO &&
- (snmp_root.udplite_InDatagrams ||
- snmp_root.udplite_OutDatagrams ||
- snmp_root.udplite_NoPorts ||
- snmp_root.udplite_InErrors ||
- snmp_root.udplite_InCsumErrors ||
- snmp_root.udplite_RcvbufErrors ||
- snmp_root.udplite_SndbufErrors ||
- snmp_root.udplite_IgnoredMulti ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udplite_packets = CONFIG_BOOLEAN_YES;
-
- {
- static RRDSET *st = NULL;
- static RRDDIM *rd_InDatagrams = NULL,
- *rd_OutDatagrams = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "udplite"
- , NULL
- , "udplite"
- , NULL
- , "IPv4 UDPLite Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE_PACKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_InDatagrams = rrddim_add(st, "InDatagrams", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutDatagrams = rrddim_add(st, "OutDatagrams", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InDatagrams, (collected_number)snmp_root.udplite_InDatagrams);
- rrddim_set_by_pointer(st, rd_OutDatagrams, (collected_number)snmp_root.udplite_OutDatagrams);
- rrdset_done(st);
- }
-
- {
- static RRDSET *st = NULL;
- static RRDDIM *rd_RcvbufErrors = NULL,
- *rd_SndbufErrors = NULL,
- *rd_InErrors = NULL,
- *rd_NoPorts = NULL,
- *rd_InCsumErrors = NULL,
- *rd_IgnoredMulti = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_IP4
- , "udplite_errors"
- , NULL
- , "udplite"
- , NULL
- , "IPv4 UDPLite Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NETSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE_ERRORS
- , update_every
- , RRDSET_TYPE_LINE);
-
- rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InErrors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_NoPorts = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_IgnoredMulti = rrddim_add(st, "IgnoredMulti", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_NoPorts, (collected_number)snmp_root.udplite_NoPorts);
- rrddim_set_by_pointer(st, rd_InErrors, (collected_number)snmp_root.udplite_InErrors);
- rrddim_set_by_pointer(st, rd_InCsumErrors, (collected_number)snmp_root.udplite_InCsumErrors);
- rrddim_set_by_pointer(st, rd_RcvbufErrors, (collected_number)snmp_root.udplite_RcvbufErrors);
- rrddim_set_by_pointer(st, rd_SndbufErrors, (collected_number)snmp_root.udplite_SndbufErrors);
- rrddim_set_by_pointer(st, rd_IgnoredMulti, (collected_number)snmp_root.udplite_IgnoredMulti);
- rrdset_done(st);
- }
- }
-
- do_proc_net_snmp6(update_every);
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfs.c b/collectors/proc.plugin/proc_net_rpc_nfs.c
deleted file mode 100644
index d6547636e..000000000
--- a/collectors/proc.plugin/proc_net_rpc_nfs.c
+++ /dev/null
@@ -1,439 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NFS_NAME "/proc/net/rpc/nfs"
-#define CONFIG_SECTION_PLUGIN_PROC_NFS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NFS_NAME
-
-struct nfs_procs {
- char name[30];
- unsigned long long value;
- int present;
- RRDDIM *rd;
-};
-
-struct nfs_procs nfs_proc2_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"root" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"readlink", 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"wrcache" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"mkdir" , 0ULL, 0, NULL}
- , {"rmdir" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"fsstat" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-struct nfs_procs nfs_proc3_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"access" , 0ULL, 0, NULL}
- , {"readlink" , 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"mkdir" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"mknod" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rmdir" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"readdirplus", 0ULL, 0, NULL}
- , {"fsstat" , 0ULL, 0, NULL}
- , {"fsinfo" , 0ULL, 0, NULL}
- , {"pathconf" , 0ULL, 0, NULL}
- , {"commit" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-struct nfs_procs nfs_proc4_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"commit" , 0ULL, 0, NULL}
- , {"open" , 0ULL, 0, NULL}
- , {"open_conf" , 0ULL, 0, NULL}
- , {"open_noat" , 0ULL, 0, NULL}
- , {"open_dgrd" , 0ULL, 0, NULL}
- , {"close" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"fsinfo" , 0ULL, 0, NULL}
- , {"renew" , 0ULL, 0, NULL}
- , {"setclntid" , 0ULL, 0, NULL}
- , {"confirm" , 0ULL, 0, NULL}
- , {"lock" , 0ULL, 0, NULL}
- , {"lockt" , 0ULL, 0, NULL}
- , {"locku" , 0ULL, 0, NULL}
- , {"access" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"lookup_root" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"pathconf" , 0ULL, 0, NULL}
- , {"statfs" , 0ULL, 0, NULL}
- , {"readlink" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"server_caps" , 0ULL, 0, NULL}
- , {"delegreturn" , 0ULL, 0, NULL}
- , {"getacl" , 0ULL, 0, NULL}
- , {"setacl" , 0ULL, 0, NULL}
- , {"fs_locations" , 0ULL, 0, NULL}
- , {"rel_lkowner" , 0ULL, 0, NULL}
- , {"secinfo" , 0ULL, 0, NULL}
- , {"fsid_present" , 0ULL, 0, NULL}
- ,
-
- /* nfsv4.1 client ops */
- { "exchange_id" , 0ULL, 0, NULL}
- , {"create_session" , 0ULL, 0, NULL}
- , {"destroy_session" , 0ULL, 0, NULL}
- , {"sequence" , 0ULL, 0, NULL}
- , {"get_lease_time" , 0ULL, 0, NULL}
- , {"reclaim_comp" , 0ULL, 0, NULL}
- , {"layoutget" , 0ULL, 0, NULL}
- , {"getdevinfo" , 0ULL, 0, NULL}
- , {"layoutcommit" , 0ULL, 0, NULL}
- , {"layoutreturn" , 0ULL, 0, NULL}
- , {"secinfo_no" , 0ULL, 0, NULL}
- , {"test_stateid" , 0ULL, 0, NULL}
- , {"free_stateid" , 0ULL, 0, NULL}
- , {"getdevicelist" , 0ULL, 0, NULL}
- , {"bind_conn_to_ses", 0ULL, 0, NULL}
- , {"destroy_clientid", 0ULL, 0, NULL}
- ,
-
- /* nfsv4.2 client ops */
- { "seek" , 0ULL, 0, NULL}
- , {"allocate" , 0ULL, 0, NULL}
- , {"deallocate" , 0ULL, 0, NULL}
- , {"layoutstats" , 0ULL, 0, NULL}
- , {"clone" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-int do_proc_net_rpc_nfs(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
- static int do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1;
- static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0;
-
- if(!ff) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfs");
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_NFS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
- }
- if(!ff) return 1;
-
- ff = procfile_readall(ff);
- if(!ff) return 0; // we return 0, so that we will retry to open it next time
-
- if(do_net == -1) do_net = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "network", 1);
- if(do_rpc == -1) do_rpc = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "rpc", 1);
- if(do_proc2 == -1) do_proc2 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v2 procedures", 1);
- if(do_proc3 == -1) do_proc3 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v3 procedures", 1);
- if(do_proc4 == -1) do_proc4 = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_NFS, "NFS v4 procedures", 1);
-
- // if they are enabled, reset them to 1
- // later we do them =2 to avoid doing strcmp() for all lines
- if(do_net) do_net = 1;
- if(do_rpc) do_rpc = 1;
- if(do_proc2) do_proc2 = 1;
- if(do_proc3) do_proc3 = 1;
- if(do_proc4) do_proc4 = 1;
-
- size_t lines = procfile_lines(ff), l;
-
- char *type;
- unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
- unsigned long long rpc_calls = 0, rpc_retransmits = 0, rpc_auth_refresh = 0;
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(!words) continue;
-
- type = procfile_lineword(ff, l, 0);
-
- if(do_net == 1 && strcmp(type, "net") == 0) {
- if(words < 5) {
- collector_error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 5);
- continue;
- }
-
- net_count = str2ull(procfile_lineword(ff, l, 1), NULL);
- net_udp_count = str2ull(procfile_lineword(ff, l, 2), NULL);
- net_tcp_count = str2ull(procfile_lineword(ff, l, 3), NULL);
- net_tcp_connections = str2ull(procfile_lineword(ff, l, 4), NULL);
-
- unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
- if(sum == 0ULL) do_net = -1;
- else do_net = 2;
- }
- else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
- if(words < 4) {
- collector_error("%s line of /proc/net/rpc/nfs has %zu words, expected %d", type, words, 6);
- continue;
- }
-
- rpc_calls = str2ull(procfile_lineword(ff, l, 1), NULL);
- rpc_retransmits = str2ull(procfile_lineword(ff, l, 2), NULL);
- rpc_auth_refresh = str2ull(procfile_lineword(ff, l, 3), NULL);
-
- unsigned long long sum = rpc_calls + rpc_retransmits + rpc_auth_refresh;
- if(sum == 0ULL) do_rpc = -1;
- else do_rpc = 2;
- }
- else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfs_proc2_values[i].name[0] ; i++, j++) {
- nfs_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfs_proc2_values[i].present = 1;
- sum += nfs_proc2_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc2_warning) {
- collector_error("Disabling /proc/net/rpc/nfs v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc2_warning = 1;
- }
- do_proc2 = 0;
- }
- else do_proc2 = 2;
- }
- else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfs_proc3_values[i].name[0] ; i++, j++) {
- nfs_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfs_proc3_values[i].present = 1;
- sum += nfs_proc3_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc3_warning) {
- collector_info("Disabling /proc/net/rpc/nfs v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc3_warning = 1;
- }
- do_proc3 = 0;
- }
- else do_proc3 = 2;
- }
- else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfs_proc4_values[i].name[0] ; i++, j++) {
- nfs_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfs_proc4_values[i].present = 1;
- sum += nfs_proc4_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc4_warning) {
- collector_info("Disabling /proc/net/rpc/nfs v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc4_warning = 1;
- }
- do_proc4 = 0;
- }
- else do_proc4 = 2;
- }
- }
-
- if(do_net == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_udp = NULL,
- *rd_tcp = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfs"
- , "net"
- , NULL
- , "network"
- , NULL
- , "NFS Client Network"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFS_NAME
- , NETDATA_CHART_PRIO_NFS_NET
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- // ignore net_count, net_tcp_connections
- (void)net_count;
- (void)net_tcp_connections;
-
- rrddim_set_by_pointer(st, rd_udp, net_udp_count);
- rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
- rrdset_done(st);
- }
-
- if(do_rpc == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_calls = NULL,
- *rd_retransmits = NULL,
- *rd_auth_refresh = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfs"
- , "rpc"
- , NULL
- , "rpc"
- , NULL
- , "NFS Client Remote Procedure Calls Statistics"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFS_NAME
- , NETDATA_CHART_PRIO_NFS_RPC
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_auth_refresh = rrddim_add(st, "auth_refresh", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_calls, rpc_calls);
- rrddim_set_by_pointer(st, rd_retransmits, rpc_retransmits);
- rrddim_set_by_pointer(st, rd_auth_refresh, rpc_auth_refresh);
- rrdset_done(st);
- }
-
- if(do_proc2 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfs"
- , "proc2"
- , NULL
- , "nfsv2rpc"
- , NULL
- , "NFS v2 Client Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFS_NAME
- , NETDATA_CHART_PRIO_NFS_PROC2
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfs_proc2_values[i].present ; i++) {
- if(unlikely(!nfs_proc2_values[i].rd))
- nfs_proc2_values[i].rd = rrddim_add(st, nfs_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfs_proc2_values[i].rd, nfs_proc2_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- if(do_proc3 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfs"
- , "proc3"
- , NULL
- , "nfsv3rpc"
- , NULL
- , "NFS v3 Client Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFS_NAME
- , NETDATA_CHART_PRIO_NFS_PROC3
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfs_proc3_values[i].present ; i++) {
- if(unlikely(!nfs_proc3_values[i].rd))
- nfs_proc3_values[i].rd = rrddim_add(st, nfs_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfs_proc3_values[i].rd, nfs_proc3_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- if(do_proc4 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfs"
- , "proc4"
- , NULL
- , "nfsv4rpc"
- , NULL
- , "NFS v4 Client Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFS_NAME
- , NETDATA_CHART_PRIO_NFS_PROC4
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfs_proc4_values[i].present ; i++) {
- if(unlikely(!nfs_proc4_values[i].rd))
- nfs_proc4_values[i].rd = rrddim_add(st, nfs_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfs_proc4_values[i].rd, nfs_proc4_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c
deleted file mode 100644
index 1d9127a03..000000000
--- a/collectors/proc.plugin/proc_net_rpc_nfsd.c
+++ /dev/null
@@ -1,763 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NFSD_NAME "/proc/net/rpc/nfsd"
-
-struct nfsd_procs {
- char name[30];
- unsigned long long value;
- int present;
- RRDDIM *rd;
-};
-
-struct nfsd_procs nfsd_proc2_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"root" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"readlink", 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"wrcache" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"mkdir" , 0ULL, 0, NULL}
- , {"rmdir" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"fsstat" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-struct nfsd_procs nfsd_proc3_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"access" , 0ULL, 0, NULL}
- , {"readlink" , 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"mkdir" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"mknod" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rmdir" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"readdirplus", 0ULL, 0, NULL}
- , {"fsstat" , 0ULL, 0, NULL}
- , {"fsinfo" , 0ULL, 0, NULL}
- , {"pathconf" , 0ULL, 0, NULL}
- , {"commit" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-struct nfsd_procs nfsd_proc4_values[] = {
- { "null" , 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"commit" , 0ULL, 0, NULL}
- , {"open" , 0ULL, 0, NULL}
- , {"open_conf" , 0ULL, 0, NULL}
- , {"open_noat" , 0ULL, 0, NULL}
- , {"open_dgrd" , 0ULL, 0, NULL}
- , {"close" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"fsinfo" , 0ULL, 0, NULL}
- , {"renew" , 0ULL, 0, NULL}
- , {"setclntid" , 0ULL, 0, NULL}
- , {"confirm" , 0ULL, 0, NULL}
- , {"lock" , 0ULL, 0, NULL}
- , {"lockt" , 0ULL, 0, NULL}
- , {"locku" , 0ULL, 0, NULL}
- , {"access" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"lookup_root" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"symlink" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"pathconf" , 0ULL, 0, NULL}
- , {"statfs" , 0ULL, 0, NULL}
- , {"readlink" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"server_caps" , 0ULL, 0, NULL}
- , {"delegreturn" , 0ULL, 0, NULL}
- , {"getacl" , 0ULL, 0, NULL}
- , {"setacl" , 0ULL, 0, NULL}
- , {"fs_locations" , 0ULL, 0, NULL}
- , {"rel_lkowner" , 0ULL, 0, NULL}
- , {"secinfo" , 0ULL, 0, NULL}
- , {"fsid_present" , 0ULL, 0, NULL}
- ,
-
- /* nfsv4.1 client ops */
- { "exchange_id" , 0ULL, 0, NULL}
- , {"create_session" , 0ULL, 0, NULL}
- , {"destroy_session" , 0ULL, 0, NULL}
- , {"sequence" , 0ULL, 0, NULL}
- , {"get_lease_time" , 0ULL, 0, NULL}
- , {"reclaim_comp" , 0ULL, 0, NULL}
- , {"layoutget" , 0ULL, 0, NULL}
- , {"getdevinfo" , 0ULL, 0, NULL}
- , {"layoutcommit" , 0ULL, 0, NULL}
- , {"layoutreturn" , 0ULL, 0, NULL}
- , {"secinfo_no" , 0ULL, 0, NULL}
- , {"test_stateid" , 0ULL, 0, NULL}
- , {"free_stateid" , 0ULL, 0, NULL}
- , {"getdevicelist" , 0ULL, 0, NULL}
- , {"bind_conn_to_ses", 0ULL, 0, NULL}
- , {"destroy_clientid", 0ULL, 0, NULL}
- ,
-
- /* nfsv4.2 client ops */
- { "seek" , 0ULL, 0, NULL}
- , {"allocate" , 0ULL, 0, NULL}
- , {"deallocate" , 0ULL, 0, NULL}
- , {"layoutstats" , 0ULL, 0, NULL}
- , {"clone" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-struct nfsd_procs nfsd4_ops_values[] = {
- { "unused_op0" , 0ULL, 0, NULL}
- , {"unused_op1" , 0ULL, 0, NULL}
- , {"future_op2" , 0ULL, 0, NULL}
- , {"access" , 0ULL, 0, NULL}
- , {"close" , 0ULL, 0, NULL}
- , {"commit" , 0ULL, 0, NULL}
- , {"create" , 0ULL, 0, NULL}
- , {"delegpurge" , 0ULL, 0, NULL}
- , {"delegreturn" , 0ULL, 0, NULL}
- , {"getattr" , 0ULL, 0, NULL}
- , {"getfh" , 0ULL, 0, NULL}
- , {"link" , 0ULL, 0, NULL}
- , {"lock" , 0ULL, 0, NULL}
- , {"lockt" , 0ULL, 0, NULL}
- , {"locku" , 0ULL, 0, NULL}
- , {"lookup" , 0ULL, 0, NULL}
- , {"lookup_root" , 0ULL, 0, NULL}
- , {"nverify" , 0ULL, 0, NULL}
- , {"open" , 0ULL, 0, NULL}
- , {"openattr" , 0ULL, 0, NULL}
- , {"open_confirm" , 0ULL, 0, NULL}
- , {"open_downgrade" , 0ULL, 0, NULL}
- , {"putfh" , 0ULL, 0, NULL}
- , {"putpubfh" , 0ULL, 0, NULL}
- , {"putrootfh" , 0ULL, 0, NULL}
- , {"read" , 0ULL, 0, NULL}
- , {"readdir" , 0ULL, 0, NULL}
- , {"readlink" , 0ULL, 0, NULL}
- , {"remove" , 0ULL, 0, NULL}
- , {"rename" , 0ULL, 0, NULL}
- , {"renew" , 0ULL, 0, NULL}
- , {"restorefh" , 0ULL, 0, NULL}
- , {"savefh" , 0ULL, 0, NULL}
- , {"secinfo" , 0ULL, 0, NULL}
- , {"setattr" , 0ULL, 0, NULL}
- , {"setclientid" , 0ULL, 0, NULL}
- , {"setclientid_confirm" , 0ULL, 0, NULL}
- , {"verify" , 0ULL, 0, NULL}
- , {"write" , 0ULL, 0, NULL}
- , {"release_lockowner" , 0ULL, 0, NULL}
- ,
-
- /* nfs41 */
- { "backchannel_ctl" , 0ULL, 0, NULL}
- , {"bind_conn_to_session", 0ULL, 0, NULL}
- , {"exchange_id" , 0ULL, 0, NULL}
- , {"create_session" , 0ULL, 0, NULL}
- , {"destroy_session" , 0ULL, 0, NULL}
- , {"free_stateid" , 0ULL, 0, NULL}
- , {"get_dir_delegation" , 0ULL, 0, NULL}
- , {"getdeviceinfo" , 0ULL, 0, NULL}
- , {"getdevicelist" , 0ULL, 0, NULL}
- , {"layoutcommit" , 0ULL, 0, NULL}
- , {"layoutget" , 0ULL, 0, NULL}
- , {"layoutreturn" , 0ULL, 0, NULL}
- , {"secinfo_no_name" , 0ULL, 0, NULL}
- , {"sequence" , 0ULL, 0, NULL}
- , {"set_ssv" , 0ULL, 0, NULL}
- , {"test_stateid" , 0ULL, 0, NULL}
- , {"want_delegation" , 0ULL, 0, NULL}
- , {"destroy_clientid" , 0ULL, 0, NULL}
- , {"reclaim_complete" , 0ULL, 0, NULL}
- ,
-
- /* nfs42 */
- { "allocate" , 0ULL, 0, NULL}
- , {"copy" , 0ULL, 0, NULL}
- , {"copy_notify" , 0ULL, 0, NULL}
- , {"deallocate" , 0ULL, 0, NULL}
- , {"ioadvise" , 0ULL, 0, NULL}
- , {"layouterror" , 0ULL, 0, NULL}
- , {"layoutstats" , 0ULL, 0, NULL}
- , {"offload_cancel" , 0ULL, 0, NULL}
- , {"offload_status" , 0ULL, 0, NULL}
- , {"read_plus" , 0ULL, 0, NULL}
- , {"seek" , 0ULL, 0, NULL}
- , {"write_same" , 0ULL, 0, NULL}
- ,
-
- /* termination */
- { "" , 0ULL, 0, NULL}
-};
-
-
-int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
- (void)dt;
- static procfile *ff = NULL;
- static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1;
- static int proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0;
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/rpc/nfsd");
- ff = procfile_open(config_get("plugin:proc:/proc/net/rpc/nfsd", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- if(unlikely(do_rc == -1)) {
- do_rc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "read cache", 1);
- do_fh = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "file handles", 1);
- do_io = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "I/O", 1);
- do_th = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "threads", 1);
- do_net = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "network", 1);
- do_rpc = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "rpc", 1);
- do_proc2 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v2 procedures", 1);
- do_proc3 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v3 procedures", 1);
- do_proc4 = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 procedures", 1);
- do_proc4ops = config_get_boolean("plugin:proc:/proc/net/rpc/nfsd", "NFS v4 operations", 1);
- }
-
- // if they are enabled, reset them to 1
- // later we do them = 2 to avoid doing strcmp() for all lines
- if(do_rc) do_rc = 1;
- if(do_fh) do_fh = 1;
- if(do_io) do_io = 1;
- if(do_th) do_th = 1;
- if(do_net) do_net = 1;
- if(do_rpc) do_rpc = 1;
- if(do_proc2) do_proc2 = 1;
- if(do_proc3) do_proc3 = 1;
- if(do_proc4) do_proc4 = 1;
- if(do_proc4ops) do_proc4ops = 1;
-
- size_t lines = procfile_lines(ff), l;
-
- char *type;
- unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0;
- unsigned long long fh_stale = 0;
- unsigned long long io_read = 0, io_write = 0;
- unsigned long long th_threads = 0;
- unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0;
- unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0;
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(!words)) continue;
-
- type = procfile_lineword(ff, l, 0);
-
- if(do_rc == 1 && strcmp(type, "rc") == 0) {
- if(unlikely(words < 4)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 4);
- continue;
- }
-
- rc_hits = str2ull(procfile_lineword(ff, l, 1), NULL);
- rc_misses = str2ull(procfile_lineword(ff, l, 2), NULL);
- rc_nocache = str2ull(procfile_lineword(ff, l, 3), NULL);
-
- unsigned long long sum = rc_hits + rc_misses + rc_nocache;
- if(sum == 0ULL) do_rc = -1;
- else do_rc = 2;
- }
- else if(do_fh == 1 && strcmp(type, "fh") == 0) {
- if(unlikely(words < 6)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
- continue;
- }
-
- fh_stale = str2ull(procfile_lineword(ff, l, 1), NULL);
-
- // other file handler metrics were never used and are always zero
-
- if(fh_stale == 0ULL) do_fh = -1;
- else do_fh = 2;
- }
- else if(do_io == 1 && strcmp(type, "io") == 0) {
- if(unlikely(words < 3)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 3);
- continue;
- }
-
- io_read = str2ull(procfile_lineword(ff, l, 1), NULL);
- io_write = str2ull(procfile_lineword(ff, l, 2), NULL);
-
- unsigned long long sum = io_read + io_write;
- if(sum == 0ULL) do_io = -1;
- else do_io = 2;
- }
- else if(do_th == 1 && strcmp(type, "th") == 0) {
- if(unlikely(words < 13)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 13);
- continue;
- }
-
- th_threads = str2ull(procfile_lineword(ff, l, 1), NULL);
-
- // thread histogram has been disabled since 2009 (kernel 2.6.30)
- // https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=8bbfa9f3889b643fc7de82c0c761ef17097f8faf
-
- do_th = 2;
- }
- else if(do_net == 1 && strcmp(type, "net") == 0) {
- if(unlikely(words < 5)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 5);
- continue;
- }
-
- net_count = str2ull(procfile_lineword(ff, l, 1), NULL);
- net_udp_count = str2ull(procfile_lineword(ff, l, 2), NULL);
- net_tcp_count = str2ull(procfile_lineword(ff, l, 3), NULL);
- net_tcp_connections = str2ull(procfile_lineword(ff, l, 4), NULL);
-
- unsigned long long sum = net_count + net_udp_count + net_tcp_count + net_tcp_connections;
- if(sum == 0ULL) do_net = -1;
- else do_net = 2;
- }
- else if(do_rpc == 1 && strcmp(type, "rpc") == 0) {
- if(unlikely(words < 6)) {
- collector_error("%s line of /proc/net/rpc/nfsd has %zu words, expected %d", type, words, 6);
- continue;
- }
-
- rpc_calls = str2ull(procfile_lineword(ff, l, 1), NULL);
- rpc_bad_format = str2ull(procfile_lineword(ff, l, 3), NULL);
- rpc_bad_auth = str2ull(procfile_lineword(ff, l, 4), NULL);
- rpc_bad_client = str2ull(procfile_lineword(ff, l, 5), NULL);
-
- unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client;
- if(sum == 0ULL) do_rpc = -1;
- else do_rpc = 2;
- }
- else if(do_proc2 == 1 && strcmp(type, "proc2") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfsd_proc2_values[i].name[0] ; i++, j++) {
- nfsd_proc2_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfsd_proc2_values[i].present = 1;
- sum += nfsd_proc2_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc2_warning) {
- collector_error("Disabling /proc/net/rpc/nfsd v2 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc2_warning = 1;
- }
- do_proc2 = 0;
- }
- else do_proc2 = 2;
- }
- else if(do_proc3 == 1 && strcmp(type, "proc3") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfsd_proc3_values[i].name[0] ; i++, j++) {
- nfsd_proc3_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfsd_proc3_values[i].present = 1;
- sum += nfsd_proc3_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc3_warning) {
- collector_info("Disabling /proc/net/rpc/nfsd v3 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc3_warning = 1;
- }
- do_proc3 = 0;
- }
- else do_proc3 = 2;
- }
- else if(do_proc4 == 1 && strcmp(type, "proc4") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfsd_proc4_values[i].name[0] ; i++, j++) {
- nfsd_proc4_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfsd_proc4_values[i].present = 1;
- sum += nfsd_proc4_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc4_warning) {
- collector_info("Disabling /proc/net/rpc/nfsd v4 procedure calls chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc4_warning = 1;
- }
- do_proc4 = 0;
- }
- else do_proc4 = 2;
- }
- else if(do_proc4ops == 1 && strcmp(type, "proc4ops") == 0) {
- // the first number is the count of numbers present
- // so we start for word 2
-
- unsigned long long sum = 0;
- unsigned int i, j;
- for(i = 0, j = 2; j < words && nfsd4_ops_values[i].name[0] ; i++, j++) {
- nfsd4_ops_values[i].value = str2ull(procfile_lineword(ff, l, j), NULL);
- nfsd4_ops_values[i].present = 1;
- sum += nfsd4_ops_values[i].value;
- }
-
- if(sum == 0ULL) {
- if(!proc4ops_warning) {
- collector_info("Disabling /proc/net/rpc/nfsd v4 operations chart. It seems unused on this machine. It will be enabled automatically when found with data in it.");
- proc4ops_warning = 1;
- }
- do_proc4ops = 0;
- }
- else do_proc4ops = 2;
- }
- }
-
- if(do_rc == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_hits = NULL,
- *rd_misses = NULL,
- *rd_nocache = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "readcache"
- , NULL
- , "cache"
- , NULL
- , "NFS Server Read Cache"
- , "reads/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_READCACHE
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_hits = rrddim_add(st, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_misses = rrddim_add(st, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_nocache = rrddim_add(st, "nocache", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_hits, rc_hits);
- rrddim_set_by_pointer(st, rd_misses, rc_misses);
- rrddim_set_by_pointer(st, rd_nocache, rc_nocache);
- rrdset_done(st);
- }
-
- if(do_fh == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_stale = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "filehandles"
- , NULL
- , "filehandles"
- , NULL
- , "NFS Server File Handles"
- , "handles/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_FILEHANDLES
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_stale, fh_stale);
- rrdset_done(st);
- }
-
- if(do_io == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_read = NULL,
- *rd_write = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "io"
- , NULL
- , "io"
- , NULL
- , "NFS Server I/O"
- , "kilobytes/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_IO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_read = rrddim_add(st, "read", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- rd_write = rrddim_add(st, "write", NULL, -1, 1000, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_read, io_read);
- rrddim_set_by_pointer(st, rd_write, io_write);
- rrdset_done(st);
- }
-
- if(do_th == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_threads = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "threads"
- , NULL
- , "threads"
- , NULL
- , "NFS Server Threads"
- , "threads"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_THREADS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_threads, th_threads);
- rrdset_done(st);
- }
-
- if(do_net == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_udp = NULL,
- *rd_tcp = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "net"
- , NULL
- , "network"
- , NULL
- , "NFS Server Network Statistics"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_NET
- , update_every
- , RRDSET_TYPE_STACKED
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- // ignore net_count, net_tcp_connections
- (void)net_count;
- (void)net_tcp_connections;
-
- rrddim_set_by_pointer(st, rd_udp, net_udp_count);
- rrddim_set_by_pointer(st, rd_tcp, net_tcp_count);
- rrdset_done(st);
- }
-
- if(do_rpc == 2) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_calls = NULL,
- *rd_bad_format = NULL,
- *rd_bad_auth = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "rpc"
- , NULL
- , "rpc"
- , NULL
- , "NFS Server Remote Procedure Calls Statistics"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_RPC
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_bad_auth = rrddim_add(st, "bad_auth", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- // ignore rpc_bad_client
- (void)rpc_bad_client;
-
- rrddim_set_by_pointer(st, rd_calls, rpc_calls);
- rrddim_set_by_pointer(st, rd_bad_format, rpc_bad_format);
- rrddim_set_by_pointer(st, rd_bad_auth, rpc_bad_auth);
- rrdset_done(st);
- }
-
- if(do_proc2 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "proc2"
- , NULL
- , "nfsv2rpc"
- , NULL
- , "NFS v2 Server Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_PROC2
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfsd_proc2_values[i].present ; i++) {
- if(unlikely(!nfsd_proc2_values[i].rd))
- nfsd_proc2_values[i].rd = rrddim_add(st, nfsd_proc2_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfsd_proc2_values[i].rd, nfsd_proc2_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- if(do_proc3 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "proc3"
- , NULL
- , "nfsv3rpc"
- , NULL
- , "NFS v3 Server Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_PROC3
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfsd_proc3_values[i].present ; i++) {
- if(unlikely(!nfsd_proc3_values[i].rd))
- nfsd_proc3_values[i].rd = rrddim_add(st, nfsd_proc3_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfsd_proc3_values[i].rd, nfsd_proc3_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- if(do_proc4 == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "proc4"
- , NULL
- , "nfsv4rpc"
- , NULL
- , "NFS v4 Server Remote Procedure Calls"
- , "calls/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_PROC4
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfsd_proc4_values[i].present ; i++) {
- if(unlikely(!nfsd_proc4_values[i].rd))
- nfsd_proc4_values[i].rd = rrddim_add(st, nfsd_proc4_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfsd_proc4_values[i].rd, nfsd_proc4_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- if(do_proc4ops == 2) {
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "nfsd"
- , "proc4ops"
- , NULL
- , "nfsv4ops"
- , NULL
- , "NFS v4 Server Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NFSD_NAME
- , NETDATA_CHART_PRIO_NFSD_PROC4OPS
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- size_t i;
- for(i = 0; nfsd4_ops_values[i].present ; i++) {
- if(unlikely(!nfsd4_ops_values[i].rd))
- nfsd4_ops_values[i].rd = rrddim_add(st, nfsd4_ops_values[i].name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(st, nfsd4_ops_values[i].rd, nfsd4_ops_values[i].value);
- }
-
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_sctp_snmp.c b/collectors/proc.plugin/proc_net_sctp_snmp.c
deleted file mode 100644
index e67143e69..000000000
--- a/collectors/proc.plugin/proc_net_sctp_snmp.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-#define PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME "/proc/net/sctp/snmp"
-
-int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
-
- static int
- do_associations = -1,
- do_transitions = -1,
- do_packet_errors = -1,
- do_packets = -1,
- do_fragmentation = -1,
- do_chunk_types = -1;
-
- static ARL_BASE *arl_base = NULL;
-
- static unsigned long long SctpCurrEstab = 0ULL;
- static unsigned long long SctpActiveEstabs = 0ULL;
- static unsigned long long SctpPassiveEstabs = 0ULL;
- static unsigned long long SctpAborteds = 0ULL;
- static unsigned long long SctpShutdowns = 0ULL;
- static unsigned long long SctpOutOfBlues = 0ULL;
- static unsigned long long SctpChecksumErrors = 0ULL;
- static unsigned long long SctpOutCtrlChunks = 0ULL;
- static unsigned long long SctpOutOrderChunks = 0ULL;
- static unsigned long long SctpOutUnorderChunks = 0ULL;
- static unsigned long long SctpInCtrlChunks = 0ULL;
- static unsigned long long SctpInOrderChunks = 0ULL;
- static unsigned long long SctpInUnorderChunks = 0ULL;
- static unsigned long long SctpFragUsrMsgs = 0ULL;
- static unsigned long long SctpReasmUsrMsgs = 0ULL;
- static unsigned long long SctpOutSCTPPacks = 0ULL;
- static unsigned long long SctpInSCTPPacks = 0ULL;
- static unsigned long long SctpT1InitExpireds = 0ULL;
- static unsigned long long SctpT1CookieExpireds = 0ULL;
- static unsigned long long SctpT2ShutdownExpireds = 0ULL;
- static unsigned long long SctpT3RtxExpireds = 0ULL;
- static unsigned long long SctpT4RtoExpireds = 0ULL;
- static unsigned long long SctpT5ShutdownGuardExpireds = 0ULL;
- static unsigned long long SctpDelaySackExpireds = 0ULL;
- static unsigned long long SctpAutocloseExpireds = 0ULL;
- static unsigned long long SctpT3Retransmits = 0ULL;
- static unsigned long long SctpPmtudRetransmits = 0ULL;
- static unsigned long long SctpFastRetransmits = 0ULL;
- static unsigned long long SctpInPktSoftirq = 0ULL;
- static unsigned long long SctpInPktBacklog = 0ULL;
- static unsigned long long SctpInPktDiscards = 0ULL;
- static unsigned long long SctpInDataChunkDiscards = 0ULL;
-
- if(unlikely(!arl_base)) {
- do_associations = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "established associations", CONFIG_BOOLEAN_AUTO);
- do_transitions = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "association transitions", CONFIG_BOOLEAN_AUTO);
- do_fragmentation = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "fragmentation", CONFIG_BOOLEAN_AUTO);
- do_packets = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packets", CONFIG_BOOLEAN_AUTO);
- do_packet_errors = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "packet errors", CONFIG_BOOLEAN_AUTO);
- do_chunk_types = config_get_boolean_ondemand("plugin:proc:/proc/net/sctp/snmp", "chunk types", CONFIG_BOOLEAN_AUTO);
-
- arl_base = arl_create("sctp", NULL, 60);
- arl_expect(arl_base, "SctpCurrEstab", &SctpCurrEstab);
- arl_expect(arl_base, "SctpActiveEstabs", &SctpActiveEstabs);
- arl_expect(arl_base, "SctpPassiveEstabs", &SctpPassiveEstabs);
- arl_expect(arl_base, "SctpAborteds", &SctpAborteds);
- arl_expect(arl_base, "SctpShutdowns", &SctpShutdowns);
- arl_expect(arl_base, "SctpOutOfBlues", &SctpOutOfBlues);
- arl_expect(arl_base, "SctpChecksumErrors", &SctpChecksumErrors);
- arl_expect(arl_base, "SctpOutCtrlChunks", &SctpOutCtrlChunks);
- arl_expect(arl_base, "SctpOutOrderChunks", &SctpOutOrderChunks);
- arl_expect(arl_base, "SctpOutUnorderChunks", &SctpOutUnorderChunks);
- arl_expect(arl_base, "SctpInCtrlChunks", &SctpInCtrlChunks);
- arl_expect(arl_base, "SctpInOrderChunks", &SctpInOrderChunks);
- arl_expect(arl_base, "SctpInUnorderChunks", &SctpInUnorderChunks);
- arl_expect(arl_base, "SctpFragUsrMsgs", &SctpFragUsrMsgs);
- arl_expect(arl_base, "SctpReasmUsrMsgs", &SctpReasmUsrMsgs);
- arl_expect(arl_base, "SctpOutSCTPPacks", &SctpOutSCTPPacks);
- arl_expect(arl_base, "SctpInSCTPPacks", &SctpInSCTPPacks);
- arl_expect(arl_base, "SctpT1InitExpireds", &SctpT1InitExpireds);
- arl_expect(arl_base, "SctpT1CookieExpireds", &SctpT1CookieExpireds);
- arl_expect(arl_base, "SctpT2ShutdownExpireds", &SctpT2ShutdownExpireds);
- arl_expect(arl_base, "SctpT3RtxExpireds", &SctpT3RtxExpireds);
- arl_expect(arl_base, "SctpT4RtoExpireds", &SctpT4RtoExpireds);
- arl_expect(arl_base, "SctpT5ShutdownGuardExpireds", &SctpT5ShutdownGuardExpireds);
- arl_expect(arl_base, "SctpDelaySackExpireds", &SctpDelaySackExpireds);
- arl_expect(arl_base, "SctpAutocloseExpireds", &SctpAutocloseExpireds);
- arl_expect(arl_base, "SctpT3Retransmits", &SctpT3Retransmits);
- arl_expect(arl_base, "SctpPmtudRetransmits", &SctpPmtudRetransmits);
- arl_expect(arl_base, "SctpFastRetransmits", &SctpFastRetransmits);
- arl_expect(arl_base, "SctpInPktSoftirq", &SctpInPktSoftirq);
- arl_expect(arl_base, "SctpInPktBacklog", &SctpInPktBacklog);
- arl_expect(arl_base, "SctpInPktDiscards", &SctpInPktDiscards);
- arl_expect(arl_base, "SctpInDataChunkDiscards", &SctpInDataChunkDiscards);
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sctp/snmp");
- ff = procfile_open(config_get("plugin:proc:/proc/net/sctp/snmp", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- arl_begin(arl_base);
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) {
- if(unlikely(words)) collector_error("Cannot read /proc/net/sctp/snmp line %zu. Expected 2 params, read %zu.", l, words);
- continue;
- }
-
- if(unlikely(arl_check(arl_base,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- // --------------------------------------------------------------------
-
- if(do_associations == CONFIG_BOOLEAN_YES || (do_associations == CONFIG_BOOLEAN_AUTO &&
- (SctpCurrEstab || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_associations = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_established = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "established"
- , NULL
- , "associations"
- , NULL
- , "SCTP current total number of established associations"
- , "associations"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_established = rrddim_add(st, "SctpCurrEstab", "established", 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_established, SctpCurrEstab);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_transitions == CONFIG_BOOLEAN_YES || (do_transitions == CONFIG_BOOLEAN_AUTO &&
- (SctpActiveEstabs ||
- SctpPassiveEstabs ||
- SctpAborteds ||
- SctpShutdowns ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_transitions = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_active = NULL,
- *rd_passive = NULL,
- *rd_aborted = NULL,
- *rd_shutdown = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "transitions"
- , NULL
- , "transitions"
- , NULL
- , "SCTP Association Transitions"
- , "transitions/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP + 10
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_active = rrddim_add(st, "SctpActiveEstabs", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_passive = rrddim_add(st, "SctpPassiveEstabs", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_aborted = rrddim_add(st, "SctpAborteds", "aborted", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_shutdown = rrddim_add(st, "SctpShutdowns", "shutdown", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_active, SctpActiveEstabs);
- rrddim_set_by_pointer(st, rd_passive, SctpPassiveEstabs);
- rrddim_set_by_pointer(st, rd_aborted, SctpAborteds);
- rrddim_set_by_pointer(st, rd_shutdown, SctpShutdowns);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_packets == CONFIG_BOOLEAN_YES || (do_packets == CONFIG_BOOLEAN_AUTO &&
- (SctpInSCTPPacks ||
- SctpOutSCTPPacks ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_packets = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL,
- *rd_sent = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "packets"
- , NULL
- , "packets"
- , NULL
- , "SCTP Packets"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP + 20
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, SctpInSCTPPacks);
- rrddim_set_by_pointer(st, rd_sent, SctpOutSCTPPacks);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_packet_errors == CONFIG_BOOLEAN_YES || (do_packet_errors == CONFIG_BOOLEAN_AUTO &&
- (SctpOutOfBlues ||
- SctpChecksumErrors ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_packet_errors = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM *rd_invalid = NULL,
- *rd_csum = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "packet_errors"
- , NULL
- , "packets"
- , NULL
- , "SCTP Packet Errors"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP + 30
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_invalid, SctpOutOfBlues);
- rrddim_set_by_pointer(st, rd_csum, SctpChecksumErrors);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_fragmentation == CONFIG_BOOLEAN_YES || (do_fragmentation == CONFIG_BOOLEAN_AUTO &&
- (SctpFragUsrMsgs ||
- SctpReasmUsrMsgs ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_fragmentation = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_fragmented = NULL,
- *rd_reassembled = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "fragmentation"
- , NULL
- , "fragmentation"
- , NULL
- , "SCTP Fragmentation"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP + 40
- , update_every
- , RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_reassembled, SctpReasmUsrMsgs);
- rrddim_set_by_pointer(st, rd_fragmented, SctpFragUsrMsgs);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_chunk_types == CONFIG_BOOLEAN_YES || (do_chunk_types == CONFIG_BOOLEAN_AUTO &&
- (SctpInCtrlChunks ||
- SctpInOrderChunks ||
- SctpInUnorderChunks ||
- SctpOutCtrlChunks ||
- SctpOutOrderChunks ||
- SctpOutUnorderChunks ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_chunk_types = CONFIG_BOOLEAN_YES;
- static RRDSET *st = NULL;
- static RRDDIM
- *rd_InCtrl = NULL,
- *rd_InOrder = NULL,
- *rd_InUnorder = NULL,
- *rd_OutCtrl = NULL,
- *rd_OutOrder = NULL,
- *rd_OutUnorder = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "sctp"
- , "chunks"
- , NULL
- , "chunks"
- , NULL
- , "SCTP Chunk Types"
- , "chunks/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SCTP_SNMP_NAME
- , NETDATA_CHART_PRIO_SCTP + 50
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_InUnorder = rrddim_add(st, "SctpInUnorderChunks", "InUnorder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutCtrl = rrddim_add(st, "SctpOutCtrlChunks", "OutCtrl", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutOrder = rrddim_add(st, "SctpOutOrderChunks", "OutOrder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_OutUnorder = rrddim_add(st, "SctpOutUnorderChunks", "OutUnorder", -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_InCtrl, SctpInCtrlChunks);
- rrddim_set_by_pointer(st, rd_InOrder, SctpInOrderChunks);
- rrddim_set_by_pointer(st, rd_InUnorder, SctpInUnorderChunks);
- rrddim_set_by_pointer(st, rd_OutCtrl, SctpOutCtrlChunks);
- rrddim_set_by_pointer(st, rd_OutOrder, SctpOutOrderChunks);
- rrddim_set_by_pointer(st, rd_OutUnorder, SctpOutUnorderChunks);
- rrdset_done(st);
- }
-
- return 0;
-}
-
diff --git a/collectors/proc.plugin/proc_net_sockstat.c b/collectors/proc.plugin/proc_net_sockstat.c
deleted file mode 100644
index b0feab5fa..000000000
--- a/collectors/proc.plugin/proc_net_sockstat.c
+++ /dev/null
@@ -1,529 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME "/proc/net/sockstat"
-
-static struct proc_net_sockstat {
- kernel_uint_t sockets_used;
-
- kernel_uint_t tcp_inuse;
- kernel_uint_t tcp_orphan;
- kernel_uint_t tcp_tw;
- kernel_uint_t tcp_alloc;
- kernel_uint_t tcp_mem;
-
- kernel_uint_t udp_inuse;
- kernel_uint_t udp_mem;
-
- kernel_uint_t udplite_inuse;
-
- kernel_uint_t raw_inuse;
-
- kernel_uint_t frag_inuse;
- kernel_uint_t frag_memory;
-} sockstat_root = { 0 };
-
-
-static int read_tcp_mem(void) {
- static char *filename = NULL;
- static const RRDVAR_ACQUIRED *tcp_mem_low_threshold = NULL,
- *tcp_mem_pressure_threshold = NULL,
- *tcp_mem_high_threshold = NULL;
-
- if(unlikely(!tcp_mem_low_threshold)) {
- tcp_mem_low_threshold = rrdvar_custom_host_variable_add_and_acquire(localhost, "tcp_mem_low");
- tcp_mem_pressure_threshold = rrdvar_custom_host_variable_add_and_acquire(localhost, "tcp_mem_pressure");
- tcp_mem_high_threshold = rrdvar_custom_host_variable_add_and_acquire(localhost, "tcp_mem_high");
- }
-
- if(unlikely(!filename)) {
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_mem", netdata_configured_host_prefix);
- filename = strdupz(buffer);
- }
-
- char buffer[200 + 1], *start, *end;
- if(read_file(filename, buffer, 200) != 0) return 1;
- buffer[200] = '\0';
-
- unsigned long long low = 0, pressure = 0, high = 0;
-
- start = buffer;
- low = strtoull(start, &end, 10);
-
- start = end;
- pressure = strtoull(start, &end, 10);
-
- start = end;
- high = strtoull(start, &end, 10);
-
- // fprintf(stderr, "TCP MEM low = %llu, pressure = %llu, high = %llu\n", low, pressure, high);
-
- rrdvar_custom_host_variable_set(localhost, tcp_mem_low_threshold, low * sysconf(_SC_PAGESIZE) / 1024.0);
- rrdvar_custom_host_variable_set(localhost, tcp_mem_pressure_threshold, pressure * sysconf(_SC_PAGESIZE) / 1024.0);
- rrdvar_custom_host_variable_set(localhost, tcp_mem_high_threshold, high * sysconf(_SC_PAGESIZE) / 1024.0);
-
- return 0;
-}
-
-static kernel_uint_t read_tcp_max_orphans(void) {
- static char *filename = NULL;
- static const RRDVAR_ACQUIRED *tcp_max_orphans_var = NULL;
-
- if(unlikely(!filename)) {
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "%s/proc/sys/net/ipv4/tcp_max_orphans", netdata_configured_host_prefix);
- filename = strdupz(buffer);
- }
-
- unsigned long long tcp_max_orphans = 0;
- if(read_single_number_file(filename, &tcp_max_orphans) == 0) {
-
- if(unlikely(!tcp_max_orphans_var))
- tcp_max_orphans_var = rrdvar_custom_host_variable_add_and_acquire(localhost, "tcp_max_orphans");
-
- rrdvar_custom_host_variable_set(localhost, tcp_max_orphans_var, tcp_max_orphans);
- return tcp_max_orphans;
- }
-
- return 0;
-}
-
-int do_proc_net_sockstat(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
-
- static uint32_t hash_sockets = 0,
- hash_raw = 0,
- hash_frag = 0,
- hash_tcp = 0,
- hash_udp = 0,
- hash_udplite = 0;
-
- static long long update_constants_every = 60, update_constants_count = 0;
-
- static ARL_BASE *arl_sockets = NULL;
- static ARL_BASE *arl_tcp = NULL;
- static ARL_BASE *arl_udp = NULL;
- static ARL_BASE *arl_udplite = NULL;
- static ARL_BASE *arl_raw = NULL;
- static ARL_BASE *arl_frag = NULL;
-
- static int do_sockets = -1, do_tcp_sockets = -1, do_tcp_mem = -1, do_udp_sockets = -1, do_udp_mem = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1, do_frag_mem = -1;
-
- static char *keys[7] = { NULL };
- static uint32_t hashes[7] = { 0 };
- static ARL_BASE *bases[7] = { NULL };
-
- if(unlikely(!arl_sockets)) {
- do_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 sockets", CONFIG_BOOLEAN_AUTO);
- do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP sockets", CONFIG_BOOLEAN_AUTO);
- do_tcp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 TCP memory", CONFIG_BOOLEAN_AUTO);
- do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP sockets", CONFIG_BOOLEAN_AUTO);
- do_udp_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDP memory", CONFIG_BOOLEAN_AUTO);
- do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
- do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 RAW sockets", CONFIG_BOOLEAN_AUTO);
- do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO);
- do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO);
-
- update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
- update_constants_count = update_constants_every;
-
- arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_sockets, "used", &sockstat_root.sockets_used);
-
- arl_tcp = arl_create("sockstat/TCP", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_tcp, "inuse", &sockstat_root.tcp_inuse);
- arl_expect(arl_tcp, "orphan", &sockstat_root.tcp_orphan);
- arl_expect(arl_tcp, "tw", &sockstat_root.tcp_tw);
- arl_expect(arl_tcp, "alloc", &sockstat_root.tcp_alloc);
- arl_expect(arl_tcp, "mem", &sockstat_root.tcp_mem);
-
- arl_udp = arl_create("sockstat/UDP", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udp, "inuse", &sockstat_root.udp_inuse);
- arl_expect(arl_udp, "mem", &sockstat_root.udp_mem);
-
- arl_udplite = arl_create("sockstat/UDPLITE", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udplite, "inuse", &sockstat_root.udplite_inuse);
-
- arl_raw = arl_create("sockstat/RAW", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_raw, "inuse", &sockstat_root.raw_inuse);
-
- arl_frag = arl_create("sockstat/FRAG", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_frag, "inuse", &sockstat_root.frag_inuse);
- arl_expect(arl_frag, "memory", &sockstat_root.frag_memory);
-
- hash_sockets = simple_hash("sockets");
- hash_tcp = simple_hash("TCP");
- hash_udp = simple_hash("UDP");
- hash_udplite = simple_hash("UDPLITE");
- hash_raw = simple_hash("RAW");
- hash_frag = simple_hash("FRAG");
-
- keys[0] = "sockets"; hashes[0] = hash_sockets; bases[0] = arl_sockets;
- keys[1] = "TCP"; hashes[1] = hash_tcp; bases[1] = arl_tcp;
- keys[2] = "UDP"; hashes[2] = hash_udp; bases[2] = arl_udp;
- keys[3] = "UDPLITE"; hashes[3] = hash_udplite; bases[3] = arl_udplite;
- keys[4] = "RAW"; hashes[4] = hash_raw; bases[4] = arl_raw;
- keys[5] = "FRAG"; hashes[5] = hash_frag; bases[5] = arl_frag;
- keys[6] = NULL; // terminator
- }
-
- update_constants_count += update_every;
- if(unlikely(update_constants_count > update_constants_every)) {
- read_tcp_max_orphans();
- read_tcp_mem();
- update_constants_count = 0;
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat");
- ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- char *key = procfile_lineword(ff, l, 0);
- uint32_t hash = simple_hash(key);
-
- int k;
- for(k = 0; keys[k] ; k++) {
- if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
- // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
- ARL_BASE *arl = bases[k];
- arl_begin(arl);
- size_t w = 1;
-
- while(w + 1 < words) {
- char *name = procfile_lineword(ff, l, w); w++;
- char *value = procfile_lineword(ff, l, w); w++;
- // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
- if(unlikely(arl_check(arl, name, value) != 0))
- break;
- }
-
- break;
- }
- }
- }
-
- // ------------------------------------------------------------------------
-
- if(do_sockets == CONFIG_BOOLEAN_YES || (do_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.sockets_used ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_used = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ip"
- , "sockstat_sockets"
- , NULL
- , "sockets"
- , NULL
- , "Sockets used for all address families"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IP_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_used = rrddim_add(st, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_used, (collected_number)sockstat_root.sockets_used);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.tcp_inuse ||
- sockstat_root.tcp_orphan ||
- sockstat_root.tcp_tw ||
- sockstat_root.tcp_alloc ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL,
- *rd_orphan = NULL,
- *rd_timewait = NULL,
- *rd_alloc = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_tcp_sockets"
- , NULL
- , "tcp"
- , NULL
- , "TCP Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_alloc = rrddim_add(st, "alloc", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_orphan = rrddim_add(st, "orphan", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_timewait = rrddim_add(st, "timewait", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.tcp_inuse);
- rrddim_set_by_pointer(st, rd_orphan, (collected_number)sockstat_root.tcp_orphan);
- rrddim_set_by_pointer(st, rd_timewait, (collected_number)sockstat_root.tcp_tw);
- rrddim_set_by_pointer(st, rd_alloc, (collected_number)sockstat_root.tcp_alloc);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_tcp_mem == CONFIG_BOOLEAN_YES || (do_tcp_mem == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.tcp_mem || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_mem = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_mem = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_tcp_mem"
- , NULL
- , "tcp"
- , NULL
- , "TCP Sockets Memory"
- , "KiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_TCP_SOCKETS_MEM
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.tcp_mem);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.udp_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udp_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_udp_sockets"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udp_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_udp_mem == CONFIG_BOOLEAN_YES || (do_udp_mem == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.udp_mem ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udp_mem = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_mem = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_udp_mem"
- , NULL
- , "udp"
- , NULL
- , "IPv4 UDP Sockets Memory"
- , "KiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDP_SOCKETS_MEM
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_mem = rrddim_add(st, "mem", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.udp_mem);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.udplite_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udplite_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_udplite_sockets"
- , NULL
- , "udplite"
- , NULL
- , "IPv4 UDPLITE Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_UDPLITE_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.udplite_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.raw_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_raw_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_raw_sockets"
- , NULL
- , "raw"
- , NULL
- , "IPv4 RAW Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_RAW
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.raw_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.frag_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_frag_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_frag_sockets"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 FRAG Sockets"
- , "fragments"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat_root.frag_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_frag_mem == CONFIG_BOOLEAN_YES || (do_frag_mem == CONFIG_BOOLEAN_AUTO &&
- (sockstat_root.frag_memory ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_frag_mem = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_mem = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv4"
- , "sockstat_frag_mem"
- , NULL
- , "fragments"
- , NULL
- , "IPv4 FRAG Sockets Memory"
- , "KiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT_NAME
- , NETDATA_CHART_PRIO_IPV4_FRAGMENTS_SOCKETS_MEM
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_mem = rrddim_add(st, "mem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_mem, (collected_number)sockstat_root.frag_memory);
- rrdset_done(st);
- }
-
- return 0;
-}
-
diff --git a/collectors/proc.plugin/proc_net_sockstat6.c b/collectors/proc.plugin/proc_net_sockstat6.c
deleted file mode 100644
index 16e0248af..000000000
--- a/collectors/proc.plugin/proc_net_sockstat6.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME "/proc/net/sockstat6"
-
-static struct proc_net_sockstat6 {
- kernel_uint_t tcp6_inuse;
- kernel_uint_t udp6_inuse;
- kernel_uint_t udplite6_inuse;
- kernel_uint_t raw6_inuse;
- kernel_uint_t frag6_inuse;
-} sockstat6_root = { 0 };
-
-int do_proc_net_sockstat6(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
-
- static uint32_t hash_raw = 0,
- hash_frag = 0,
- hash_tcp = 0,
- hash_udp = 0,
- hash_udplite = 0;
-
- static ARL_BASE *arl_tcp = NULL;
- static ARL_BASE *arl_udp = NULL;
- static ARL_BASE *arl_udplite = NULL;
- static ARL_BASE *arl_raw = NULL;
- static ARL_BASE *arl_frag = NULL;
-
- static int do_tcp_sockets = -1, do_udp_sockets = -1, do_udplite_sockets = -1, do_raw_sockets = -1, do_frag_sockets = -1;
-
- static char *keys[6] = { NULL };
- static uint32_t hashes[6] = { 0 };
- static ARL_BASE *bases[6] = { NULL };
-
- if(unlikely(!arl_tcp)) {
- do_tcp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 TCP sockets", CONFIG_BOOLEAN_AUTO);
- do_udp_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDP sockets", CONFIG_BOOLEAN_AUTO);
- do_udplite_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 UDPLITE sockets", CONFIG_BOOLEAN_AUTO);
- do_raw_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 RAW sockets", CONFIG_BOOLEAN_AUTO);
- do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat6", "ipv6 FRAG sockets", CONFIG_BOOLEAN_AUTO);
-
- arl_tcp = arl_create("sockstat6/TCP6", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_tcp, "inuse", &sockstat6_root.tcp6_inuse);
-
- arl_udp = arl_create("sockstat6/UDP6", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udp, "inuse", &sockstat6_root.udp6_inuse);
-
- arl_udplite = arl_create("sockstat6/UDPLITE6", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_udplite, "inuse", &sockstat6_root.udplite6_inuse);
-
- arl_raw = arl_create("sockstat6/RAW6", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_raw, "inuse", &sockstat6_root.raw6_inuse);
-
- arl_frag = arl_create("sockstat6/FRAG6", arl_callback_str2kernel_uint_t, 60);
- arl_expect(arl_frag, "inuse", &sockstat6_root.frag6_inuse);
-
- hash_tcp = simple_hash("TCP6");
- hash_udp = simple_hash("UDP6");
- hash_udplite = simple_hash("UDPLITE6");
- hash_raw = simple_hash("RAW6");
- hash_frag = simple_hash("FRAG6");
-
- keys[0] = "TCP6"; hashes[0] = hash_tcp; bases[0] = arl_tcp;
- keys[1] = "UDP6"; hashes[1] = hash_udp; bases[1] = arl_udp;
- keys[2] = "UDPLITE6"; hashes[2] = hash_udplite; bases[2] = arl_udplite;
- keys[3] = "RAW6"; hashes[3] = hash_raw; bases[3] = arl_raw;
- keys[4] = "FRAG6"; hashes[4] = hash_frag; bases[4] = arl_frag;
- keys[5] = NULL; // terminator
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/sockstat6");
- ff = procfile_open(config_get("plugin:proc:/proc/net/sockstat6", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- char *key = procfile_lineword(ff, l, 0);
- uint32_t hash = simple_hash(key);
-
- int k;
- for(k = 0; keys[k] ; k++) {
- if(unlikely(hash == hashes[k] && strcmp(key, keys[k]) == 0)) {
- // fprintf(stderr, "KEY: '%s', l=%zu, w=1, words=%zu\n", key, l, words);
- ARL_BASE *arl = bases[k];
- arl_begin(arl);
- size_t w = 1;
-
- while(w + 1 < words) {
- char *name = procfile_lineword(ff, l, w); w++;
- char *value = procfile_lineword(ff, l, w); w++;
- // fprintf(stderr, " > NAME '%s', VALUE '%s', l=%zu, w=%zu, words=%zu\n", name, value, l, w, words);
- if(unlikely(arl_check(arl, name, value) != 0))
- break;
- }
-
- break;
- }
- }
- }
-
- // ------------------------------------------------------------------------
-
- if(do_tcp_sockets == CONFIG_BOOLEAN_YES || (do_tcp_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat6_root.tcp6_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_tcp_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "sockstat6_tcp_sockets"
- , NULL
- , "tcp6"
- , NULL
- , "IPv6 TCP Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.tcp6_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_udp_sockets == CONFIG_BOOLEAN_YES || (do_udp_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat6_root.udp6_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udp_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "sockstat6_udp_sockets"
- , NULL
- , "udp6"
- , NULL
- , "IPv6 UDP Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_UDP_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udp6_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_udplite_sockets == CONFIG_BOOLEAN_YES || (do_udplite_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat6_root.udplite6_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_udplite_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "sockstat6_udplite_sockets"
- , NULL
- , "udplite6"
- , NULL
- , "IPv6 UDPLITE Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_UDPLITE_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.udplite6_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_raw_sockets == CONFIG_BOOLEAN_YES || (do_raw_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat6_root.raw6_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_raw_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "sockstat6_raw_sockets"
- , NULL
- , "raw6"
- , NULL
- , "IPv6 RAW Sockets"
- , "sockets"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_RAW_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.raw6_inuse);
- rrdset_done(st);
- }
-
- // ------------------------------------------------------------------------
-
- if(do_frag_sockets == CONFIG_BOOLEAN_YES || (do_frag_sockets == CONFIG_BOOLEAN_AUTO &&
- (sockstat6_root.frag6_inuse ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_frag_sockets = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_inuse = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "ipv6"
- , "sockstat6_frag_sockets"
- , NULL
- , "fragments6"
- , NULL
- , "IPv6 FRAG Sockets"
- , "fragments"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOCKSTAT6_NAME
- , NETDATA_CHART_PRIO_IPV6_FRAGMENTS_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inuse = rrddim_add(st, "inuse", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_inuse, (collected_number)sockstat6_root.frag6_inuse);
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_softnet_stat.c b/collectors/proc.plugin/proc_net_softnet_stat.c
deleted file mode 100644
index 2f01b8859..000000000
--- a/collectors/proc.plugin/proc_net_softnet_stat.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NET_SOFTNET_NAME "/proc/net/softnet_stat"
-
-static inline char *softnet_column_name(size_t column) {
- switch(column) {
- // https://github.com/torvalds/linux/blob/a7fd20d1c476af4563e66865213474a2f9f473a4/net/core/net-procfs.c#L161-L166
- case 0: return "processed";
- case 1: return "dropped";
- case 2: return "squeezed";
- case 9: return "received_rps";
- case 10: return "flow_limit_count";
- default: return NULL;
- }
-}
-
-int do_proc_net_softnet_stat(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
- static int do_per_core = -1;
- static size_t allocated_lines = 0, allocated_columns = 0;
- static uint32_t *data = NULL;
-
- if(unlikely(do_per_core == -1)) do_per_core = config_get_boolean("plugin:proc:/proc/net/softnet_stat", "softnet_stat per core", 1);
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/softnet_stat");
- ff = procfile_open(config_get("plugin:proc:/proc/net/softnet_stat", "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
- size_t words = procfile_linewords(ff, 0), w;
-
- if(unlikely(!lines || !words)) {
- collector_error("Cannot read /proc/net/softnet_stat, %zu lines and %zu columns reported.", lines, words);
- return 1;
- }
-
- if(unlikely(lines > 200)) lines = 200;
- if(unlikely(words > 50)) words = 50;
-
- if(unlikely(!data || lines > allocated_lines || words > allocated_columns)) {
- freez(data);
- allocated_lines = lines;
- allocated_columns = words;
- data = mallocz((allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
- }
-
- // initialize to zero
- memset(data, 0, (allocated_lines + 1) * allocated_columns * sizeof(uint32_t));
-
- // parse the values
- for(l = 0; l < lines ;l++) {
- words = procfile_linewords(ff, l);
- if(unlikely(!words)) continue;
-
- if(unlikely(words > allocated_columns))
- words = allocated_columns;
-
- for(w = 0; w < words ; w++) {
- if(unlikely(softnet_column_name(w))) {
- uint32_t t = (uint32_t)strtoul(procfile_lineword(ff, l, w), NULL, 16);
- data[w] += t;
- data[((l + 1) * allocated_columns) + w] = t;
- }
- }
- }
-
- if(unlikely(data[(lines * allocated_columns)] == 0))
- lines--;
-
- RRDSET *st;
-
- // --------------------------------------------------------------------
-
- st = rrdset_find_active_bytype_localhost("system", "softnet_stat");
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "softnet_stat"
- , NULL
- , "softnet_stat"
- , "system.softnet_stat"
- , "System softnet_stat"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
- , NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT
- , update_every
- , RRDSET_TYPE_LINE
- );
- for(w = 0; w < allocated_columns ;w++)
- if(unlikely(softnet_column_name(w)))
- rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- for(w = 0; w < allocated_columns ;w++)
- if(unlikely(softnet_column_name(w)))
- rrddim_set(st, softnet_column_name(w), data[w]);
-
- rrdset_done(st);
-
- if(do_per_core) {
- for(l = 0; l < lines ;l++) {
- char id[50+1];
- snprintfz(id, sizeof(id) - 1,"cpu%zu_softnet_stat", l);
-
- st = rrdset_find_active_bytype_localhost("cpu", id);
- if(unlikely(!st)) {
- char title[100+1];
- snprintfz(title, sizeof(title) - 1, "CPU softnet_stat");
-
- st = rrdset_create_localhost(
- "cpu"
- , id
- , NULL
- , "softnet_stat"
- , "cpu.softnet_stat"
- , title
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_NET_SOFTNET_NAME
- , NETDATA_CHART_PRIO_SOFTNET_PER_CORE + l
- , update_every
- , RRDSET_TYPE_LINE
- );
- for(w = 0; w < allocated_columns ;w++)
- if(unlikely(softnet_column_name(w)))
- rrddim_add(st, softnet_column_name(w), NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- for(w = 0; w < allocated_columns ;w++)
- if(unlikely(softnet_column_name(w)))
- rrddim_set(st, softnet_column_name(w), data[((l + 1) * allocated_columns) + w]);
-
- rrdset_done(st);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_stat_conntrack.c b/collectors/proc.plugin/proc_net_stat_conntrack.c
deleted file mode 100644
index e8fbdbb66..000000000
--- a/collectors/proc.plugin/proc_net_stat_conntrack.c
+++ /dev/null
@@ -1,345 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
-#define RRD_TYPE_NET_STAT_CONNTRACK "conntrack"
-#define PLUGIN_PROC_MODULE_CONNTRACK_NAME "/proc/net/stat/nf_conntrack"
-
-int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
- static procfile *ff = NULL;
- static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1;
- static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0;
- static int read_full = 1;
- static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
- static const RRDVAR_ACQUIRED *rrdvar_max = NULL;
-
- unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0,
- ainsert = 0, ainsert_failed = 0, adrop = 0, aearly_drop = 0, aicmp_error = 0, aexpect_new = 0, aexpect_create = 0, aexpect_delete = 0, asearch_restart = 0;
-
- if(unlikely(do_sockets == -1)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/nf_conntrack");
- nf_conntrack_filename = config_get("plugin:proc:/proc/net/stat/nf_conntrack", "filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_max");
- nf_conntrack_max_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "filename to monitor", filename);
- usec_since_last_max = get_max_every = config_get_number("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max", "read every seconds", 10) * USEC_PER_SEC;
-
- read_full = 1;
- ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(!ff) read_full = 0;
-
- do_new = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter new connections", read_full);
- do_changes = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection changes", read_full);
- do_expect = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection expectations", read_full);
- do_search = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connection searches", read_full);
- do_errors = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter errors", read_full);
-
- do_sockets = 1;
- if(!read_full) {
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/net/netfilter/nf_conntrack_count");
- nf_conntrack_count_filename = config_get("plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count", "filename to monitor", filename);
-
- if(read_single_number_file(nf_conntrack_count_filename, &aentries))
- do_sockets = 0;
- }
-
- do_sockets = config_get_boolean("plugin:proc:/proc/net/stat/nf_conntrack", "netfilter connections", do_sockets);
-
- if(!do_sockets && !read_full)
- return 1;
-
- rrdvar_max = rrdvar_custom_host_variable_add_and_acquire(localhost, "netfilter_conntrack_max");
- }
-
- if(likely(read_full)) {
- if(unlikely(!ff)) {
- ff = procfile_open(nf_conntrack_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- for(l = 1; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 17)) {
- if(unlikely(words)) collector_error("Cannot read /proc/net/stat/nf_conntrack line. Expected 17 params, read %zu.", words);
- continue;
- }
-
- unsigned long long tentries = 0, tsearched = 0, tfound = 0, tnew = 0, tinvalid = 0, tignore = 0, tdelete = 0, tdelete_list = 0, tinsert = 0, tinsert_failed = 0, tdrop = 0, tearly_drop = 0, ticmp_error = 0, texpect_new = 0, texpect_create = 0, texpect_delete = 0, tsearch_restart = 0;
-
- tentries = strtoull(procfile_lineword(ff, l, 0), NULL, 16);
- tsearched = strtoull(procfile_lineword(ff, l, 1), NULL, 16);
- tfound = strtoull(procfile_lineword(ff, l, 2), NULL, 16);
- tnew = strtoull(procfile_lineword(ff, l, 3), NULL, 16);
- tinvalid = strtoull(procfile_lineword(ff, l, 4), NULL, 16);
- tignore = strtoull(procfile_lineword(ff, l, 5), NULL, 16);
- tdelete = strtoull(procfile_lineword(ff, l, 6), NULL, 16);
- tdelete_list = strtoull(procfile_lineword(ff, l, 7), NULL, 16);
- tinsert = strtoull(procfile_lineword(ff, l, 8), NULL, 16);
- tinsert_failed = strtoull(procfile_lineword(ff, l, 9), NULL, 16);
- tdrop = strtoull(procfile_lineword(ff, l, 10), NULL, 16);
- tearly_drop = strtoull(procfile_lineword(ff, l, 11), NULL, 16);
- ticmp_error = strtoull(procfile_lineword(ff, l, 12), NULL, 16);
- texpect_new = strtoull(procfile_lineword(ff, l, 13), NULL, 16);
- texpect_create = strtoull(procfile_lineword(ff, l, 14), NULL, 16);
- texpect_delete = strtoull(procfile_lineword(ff, l, 15), NULL, 16);
- tsearch_restart = strtoull(procfile_lineword(ff, l, 16), NULL, 16);
-
- if(unlikely(!aentries)) aentries = tentries;
-
- // sum all the cpus together
- asearched += tsearched; // conntrack.search
- afound += tfound; // conntrack.search
- anew += tnew; // conntrack.new
- ainvalid += tinvalid; // conntrack.new
- aignore += tignore; // conntrack.new
- adelete += tdelete; // conntrack.changes
- adelete_list += tdelete_list; // conntrack.changes
- ainsert += tinsert; // conntrack.changes
- ainsert_failed += tinsert_failed; // conntrack.errors
- adrop += tdrop; // conntrack.errors
- aearly_drop += tearly_drop; // conntrack.errors
- aicmp_error += ticmp_error; // conntrack.errors
- aexpect_new += texpect_new; // conntrack.expect
- aexpect_create += texpect_create; // conntrack.expect
- aexpect_delete += texpect_delete; // conntrack.expect
- asearch_restart += tsearch_restart; // conntrack.search
- }
- }
- else {
- if(unlikely(read_single_number_file(nf_conntrack_count_filename, &aentries)))
- return 0; // we return 0, so that we will retry to open it next time
- }
-
- usec_since_last_max += dt;
- if(unlikely(rrdvar_max && usec_since_last_max >= get_max_every)) {
- usec_since_last_max = 0;
-
- unsigned long long max;
- if(likely(!read_single_number_file(nf_conntrack_max_filename, &max)))
- rrdvar_custom_host_variable_set(localhost, rrdvar_max, max);
- }
-
- // --------------------------------------------------------------------
-
- if(do_sockets) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_connections = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_sockets"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker Connections"
- , "active connections"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_SOCKETS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_connections = rrddim_add(st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_connections, aentries);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_new) {
- static RRDSET *st = NULL;
- static RRDDIM
- *rd_new = NULL,
- *rd_ignore = NULL,
- *rd_invalid = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_new"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker New Connections"
- , "connections/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_NEW
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_ignore = rrddim_add(st, "ignore", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_invalid = rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_new, anew);
- rrddim_set_by_pointer(st, rd_ignore, aignore);
- rrddim_set_by_pointer(st, rd_invalid, ainvalid);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_changes) {
- static RRDSET *st = NULL;
- static RRDDIM
- *rd_inserted = NULL,
- *rd_deleted = NULL,
- *rd_delete_list = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_changes"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker Changes"
- , "changes/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_CHANGES
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_delete_list = rrddim_add(st, "delete_list", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_inserted, ainsert);
- rrddim_set_by_pointer(st, rd_deleted, adelete);
- rrddim_set_by_pointer(st, rd_delete_list, adelete_list);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_expect) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_created = NULL,
- *rd_deleted = NULL,
- *rd_new = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_expect"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker Expectations"
- , "expectations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_EXPECT
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_new = rrddim_add(st, "new", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_created, aexpect_create);
- rrddim_set_by_pointer(st, rd_deleted, aexpect_delete);
- rrddim_set_by_pointer(st, rd_new, aexpect_new);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_search) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_searched = NULL,
- *rd_restarted = NULL,
- *rd_found = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_search"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker Searches"
- , "searches/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_SEARCH
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_found = rrddim_add(st, "found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_searched, asearched);
- rrddim_set_by_pointer(st, rd_restarted, asearch_restart);
- rrddim_set_by_pointer(st, rd_found, afound);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_errors) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_icmp_error = NULL,
- *rd_insert_failed = NULL,
- *rd_drop = NULL,
- *rd_early_drop = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_CONNTRACK "_errors"
- , NULL
- , RRD_TYPE_NET_STAT_CONNTRACK
- , NULL
- , "Connection Tracker Errors"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_CONNTRACK_NAME
- , NETDATA_CHART_PRIO_NETFILTER_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
- rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_drop = rrddim_add(st, "drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_early_drop = rrddim_add(st, "early_drop", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_icmp_error, aicmp_error);
- rrddim_set_by_pointer(st, rd_insert_failed, ainsert_failed);
- rrddim_set_by_pointer(st, rd_drop, adrop);
- rrddim_set_by_pointer(st, rd_early_drop, aearly_drop);
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c
deleted file mode 100644
index e23a0ab7b..000000000
--- a/collectors/proc.plugin/proc_net_stat_synproxy.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_SYNPROXY_NAME "/proc/net/stat/synproxy"
-
-#define RRD_TYPE_NET_STAT_NETFILTER "netfilter"
-#define RRD_TYPE_NET_STAT_SYNPROXY "synproxy"
-
-int do_proc_net_stat_synproxy(int update_every, usec_t dt) {
- (void)dt;
-
- static int do_cookies = -1, do_syns = -1, do_reopened = -1;
- static procfile *ff = NULL;
-
- if(unlikely(do_cookies == -1)) {
- do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO);
- do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO);
- do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO);
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/stat/synproxy");
- ff = procfile_open(config_get("plugin:proc:/proc/net/stat/synproxy", "filename to monitor", filename), " \t,:|", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- // make sure we have 3 lines
- size_t lines = procfile_lines(ff), l;
- if(unlikely(lines < 2)) {
- collector_error("/proc/net/stat/synproxy has %zu lines, expected no less than 2. Disabling it.", lines);
- return 1;
- }
-
- unsigned long long syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0;
-
- // synproxy gives its values per CPU
- for(l = 1; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 6))
- continue;
-
- syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16);
- cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16);
- cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16);
- cookie_retrans += strtoull(procfile_lineword(ff, l, 4), NULL, 16);
- conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16);
- }
-
- unsigned long long events = syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened;
-
- // --------------------------------------------------------------------
-
- if(do_syns == CONFIG_BOOLEAN_YES || (do_syns == CONFIG_BOOLEAN_AUTO &&
- (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_syns = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_SYNPROXY "_syn_received"
- , NULL
- , RRD_TYPE_NET_STAT_SYNPROXY
- , NULL
- , "SYNPROXY SYN Packets received"
- , "packets/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_SYNPROXY_NAME
- , NETDATA_CHART_PRIO_SYNPROXY_SYN_RECEIVED
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "received", syn_received);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_reopened == CONFIG_BOOLEAN_YES || (do_reopened == CONFIG_BOOLEAN_AUTO &&
- (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_reopened = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_SYNPROXY "_conn_reopened"
- , NULL
- , RRD_TYPE_NET_STAT_SYNPROXY
- , NULL
- , "SYNPROXY Connections Reopened"
- , "connections/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_SYNPROXY_NAME
- , NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "reopened", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "reopened", conn_reopened);
- rrdset_done(st);
- }
-
- // --------------------------------------------------------------------
-
- if(do_cookies == CONFIG_BOOLEAN_YES || (do_cookies == CONFIG_BOOLEAN_AUTO &&
- (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_cookies = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st = NULL;
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- RRD_TYPE_NET_STAT_NETFILTER
- , RRD_TYPE_NET_STAT_SYNPROXY "_cookies"
- , NULL
- , RRD_TYPE_NET_STAT_SYNPROXY
- , NULL
- , "SYNPROXY TCP Cookies"
- , "cookies/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_SYNPROXY_NAME
- , NETDATA_CHART_PRIO_SYNPROXY_COOKIES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrddim_add(st, "valid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "invalid", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(st, "retransmits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set(st, "valid", cookie_valid);
- rrddim_set(st, "invalid", cookie_invalid);
- rrddim_set(st, "retransmits", cookie_retrans);
- rrdset_done(st);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_net_wireless.c b/collectors/proc.plugin/proc_net_wireless.c
deleted file mode 100644
index c7efa3335..000000000
--- a/collectors/proc.plugin/proc_net_wireless.c
+++ /dev/null
@@ -1,433 +0,0 @@
-#include <stdbool.h>
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_NETWIRELESS_NAME "/proc/net/wireless"
-
-#define CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_NETWIRELESS_NAME
-
-
-static struct netwireless {
- char *name;
- uint32_t hash;
-
- //flags
- bool configured;
- struct timeval updated;
-
- int do_status;
- int do_quality;
- int do_discarded_packets;
- int do_missed_beacon;
-
- // Data collected
- // status
- kernel_uint_t status;
-
- // Quality
- NETDATA_DOUBLE link;
- NETDATA_DOUBLE level;
- NETDATA_DOUBLE noise;
-
- // Discarded packets
- kernel_uint_t nwid;
- kernel_uint_t crypt;
- kernel_uint_t frag;
- kernel_uint_t retry;
- kernel_uint_t misc;
-
- // missed beacon
- kernel_uint_t missed_beacon;
-
- const char *chart_id_net_status;
- const char *chart_id_net_link;
- const char *chart_id_net_level;
- const char *chart_id_net_noise;
- const char *chart_id_net_discarded_packets;
- const char *chart_id_net_missed_beacon;
-
- const char *chart_family;
-
- // charts
- // status
- RRDSET *st_status;
-
- // Quality
- RRDSET *st_link;
- RRDSET *st_level;
- RRDSET *st_noise;
-
- // Discarded Packets
- RRDSET *st_discarded_packets;
- // Missed beacon
- RRDSET *st_missed_beacon;
-
- // Dimensions
- // status
- RRDDIM *rd_status;
-
- // Quality
- RRDDIM *rd_link;
- RRDDIM *rd_level;
- RRDDIM *rd_noise;
-
- // Discarded packets
- RRDDIM *rd_nwid;
- RRDDIM *rd_crypt;
- RRDDIM *rd_frag;
- RRDDIM *rd_retry;
- RRDDIM *rd_misc;
-
- // missed beacon
- RRDDIM *rd_missed_beacon;
-
- struct netwireless *next;
-} *netwireless_root = NULL;
-
-static void netwireless_free_st(struct netwireless *wireless_dev)
-{
- if (wireless_dev->st_status) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_status);
- if (wireless_dev->st_link) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_link);
- if (wireless_dev->st_level) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_level);
- if (wireless_dev->st_noise) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_noise);
- if (wireless_dev->st_discarded_packets)
- rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_discarded_packets);
- if (wireless_dev->st_missed_beacon) rrdset_is_obsolete___safe_from_collector_thread(wireless_dev->st_missed_beacon);
-
- wireless_dev->st_status = NULL;
- wireless_dev->st_link = NULL;
- wireless_dev->st_level = NULL;
- wireless_dev->st_noise = NULL;
- wireless_dev->st_discarded_packets = NULL;
- wireless_dev->st_missed_beacon = NULL;
-}
-
-static void netwireless_free(struct netwireless *wireless_dev)
-{
- wireless_dev->next = NULL;
- freez((void *)wireless_dev->name);
- netwireless_free_st(wireless_dev);
- freez((void *)wireless_dev->chart_id_net_status);
- freez((void *)wireless_dev->chart_id_net_link);
- freez((void *)wireless_dev->chart_id_net_level);
- freez((void *)wireless_dev->chart_id_net_noise);
- freez((void *)wireless_dev->chart_id_net_discarded_packets);
- freez((void *)wireless_dev->chart_id_net_missed_beacon);
-
- freez((void *)wireless_dev);
-}
-
-static void netwireless_cleanup(struct timeval *timestamp)
-{
- struct netwireless *previous = NULL;
- struct netwireless *current;
- // search it, from beginning to the end
- for (current = netwireless_root; current;) {
-
- if (timercmp(&current->updated, timestamp, <)) {
- struct netwireless *to_free = current;
- current = current->next;
- netwireless_free(to_free);
-
- if (previous) {
- previous->next = current;
- } else {
- netwireless_root = current;
- }
- } else {
- previous = current;
- current = current->next;
- }
- }
-}
-
-// finds an existing interface or creates a new entry
-static struct netwireless *find_or_create_wireless(const char *name)
-{
- struct netwireless *wireless;
- uint32_t hash = simple_hash(name);
-
- // search it, from beginning to the end
- for (wireless = netwireless_root ; wireless ; wireless = wireless->next) {
- if (unlikely(hash == wireless->hash && !strcmp(name, wireless->name))) {
- return wireless;
- }
- }
-
- // create a new one
- wireless = callocz(1, sizeof(struct netwireless));
- wireless->name = strdupz(name);
- wireless->hash = hash;
-
- // link it to the end
- if (netwireless_root) {
- struct netwireless *last_node;
- for (last_node = netwireless_root; last_node->next ; last_node = last_node->next);
-
- last_node->next = wireless;
- } else
- netwireless_root = wireless;
-
- return wireless;
-}
-
-static void configure_device(int do_status, int do_quality, int do_discarded_packets, int do_missed,
- struct netwireless *wireless_dev) {
- wireless_dev->do_status = do_status;
- wireless_dev->do_quality = do_quality;
- wireless_dev->do_discarded_packets = do_discarded_packets;
- wireless_dev->do_missed_beacon = do_missed;
- wireless_dev->configured = true;
-
- char buffer[RRD_ID_LENGTH_MAX + 1];
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_status", wireless_dev->name);
- wireless_dev->chart_id_net_status = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_link_quality", wireless_dev->name);
- wireless_dev->chart_id_net_link = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_signal_level", wireless_dev->name);
- wireless_dev->chart_id_net_level = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_noise_level", wireless_dev->name);
- wireless_dev->chart_id_net_noise = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_discarded_packets", wireless_dev->name);
- wireless_dev->chart_id_net_discarded_packets = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "%s_missed_beacon", wireless_dev->name);
- wireless_dev->chart_id_net_missed_beacon = strdupz(buffer);
-}
-
-static void add_labels_to_wireless(struct netwireless *w, RRDSET *st) {
- rrdlabels_add(st->rrdlabels, "device", w->name, RRDLABEL_SRC_AUTO);
-}
-
-int do_proc_net_wireless(int update_every, usec_t dt)
-{
- UNUSED(dt);
- static procfile *ff = NULL;
- static int do_status, do_quality = -1, do_discarded_packets, do_beacon;
- static char *proc_net_wireless_filename = NULL;
-
- if (unlikely(do_quality == -1)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/net/wireless");
-
- proc_net_wireless_filename = config_get(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS,"filename to monitor", filename);
- do_status = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS, "status for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_quality = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS, "quality for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_discarded_packets = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS, "discarded packets for all interfaces", CONFIG_BOOLEAN_AUTO);
- do_beacon = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_NETWIRELESS, "missed beacon for all interface", CONFIG_BOOLEAN_AUTO);
- }
-
- if (unlikely(!ff)) {
- ff = procfile_open(proc_net_wireless_filename, " \t,|", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if (unlikely(!ff)) return 1;
-
- size_t lines = procfile_lines(ff);
- struct timeval timestamp;
- size_t l;
- gettimeofday(&timestamp, NULL);
- for (l = 2; l < lines; l++) {
- if (unlikely(procfile_linewords(ff, l) < 11)) continue;
-
- char *name = procfile_lineword(ff, l, 0);
- size_t len = strlen(name);
- if (name[len - 1] == ':') name[len - 1] = '\0';
-
- struct netwireless *wireless_dev = find_or_create_wireless(name);
-
- if (unlikely(!wireless_dev->configured)) {
- configure_device(do_status, do_quality, do_discarded_packets, do_beacon, wireless_dev);
- }
-
- if (likely(do_status != CONFIG_BOOLEAN_NO)) {
- wireless_dev->status = str2kernel_uint_t(procfile_lineword(ff, l, 1));
-
- if (unlikely(!wireless_dev->st_status)) {
- wireless_dev->st_status = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_status,
- NULL,
- wireless_dev->name,
- "wireless.status",
- "Internal status reported by interface.",
- "status",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(wireless_dev->st_status, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_status = rrddim_add(wireless_dev->st_status, "status", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_status);
- }
-
- rrddim_set_by_pointer(wireless_dev->st_status, wireless_dev->rd_status,
- (collected_number)wireless_dev->status);
- rrdset_done(wireless_dev->st_status);
- }
-
- if (likely(do_quality != CONFIG_BOOLEAN_NO)) {
- wireless_dev->link = str2ndd(procfile_lineword(ff, l, 2), NULL);
- wireless_dev->level = str2ndd(procfile_lineword(ff, l, 3), NULL);
- wireless_dev->noise = str2ndd(procfile_lineword(ff, l, 4), NULL);
-
- if (unlikely(!wireless_dev->st_link)) {
- wireless_dev->st_link = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_link,
- NULL,
- wireless_dev->name,
- "wireless.link_quality",
- "Overall quality of the link. This is an aggregate value, and depends on the driver and hardware.",
- "value",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE + 1,
- update_every,
- RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_link, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_link = rrddim_add(wireless_dev->st_link, "link_quality", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_link);
- }
-
- if (unlikely(!wireless_dev->st_level)) {
- wireless_dev->st_level = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_level,
- NULL,
- wireless_dev->name,
- "wireless.signal_level",
- "The signal level is the wireless signal power level received by the wireless client. The closer the value is to 0, the stronger the signal.",
- "dBm",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE + 2,
- update_every,
- RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_level, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_level = rrddim_add(wireless_dev->st_level, "signal_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_level);
- }
-
- if (unlikely(!wireless_dev->st_noise)) {
- wireless_dev->st_noise = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_noise,
- NULL,
- wireless_dev->name,
- "wireless.noise_level",
- "The noise level indicates the amount of background noise in your environment. The closer the value to 0, the greater the noise level.",
- "dBm",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE + 3,
- update_every,
- RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_noise, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_noise = rrddim_add(wireless_dev->st_noise, "noise_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_noise);
- }
-
- rrddim_set_by_pointer(wireless_dev->st_link, wireless_dev->rd_link, (collected_number)wireless_dev->link);
- rrdset_done(wireless_dev->st_link);
-
- rrddim_set_by_pointer(wireless_dev->st_level, wireless_dev->rd_level, (collected_number)wireless_dev->level);
- rrdset_done(wireless_dev->st_level);
-
- rrddim_set_by_pointer(wireless_dev->st_noise, wireless_dev->rd_noise, (collected_number)wireless_dev->noise);
- rrdset_done(wireless_dev->st_noise);
- }
-
- if (likely(do_discarded_packets)) {
- wireless_dev->nwid = str2kernel_uint_t(procfile_lineword(ff, l, 5));
- wireless_dev->crypt = str2kernel_uint_t(procfile_lineword(ff, l, 6));
- wireless_dev->frag = str2kernel_uint_t(procfile_lineword(ff, l, 7));
- wireless_dev->retry = str2kernel_uint_t(procfile_lineword(ff, l, 8));
- wireless_dev->misc = str2kernel_uint_t(procfile_lineword(ff, l, 9));
-
- if (unlikely(!wireless_dev->st_discarded_packets)) {
- wireless_dev->st_discarded_packets = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_discarded_packets,
- NULL,
- wireless_dev->name,
- "wireless.discarded_packets",
- "Packet discarded in the wireless adapter due to \"wireless\" specific problems.",
- "packets/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE + 4,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(wireless_dev->st_discarded_packets, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_nwid = rrddim_add(wireless_dev->st_discarded_packets, "nwid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- wireless_dev->rd_crypt = rrddim_add(wireless_dev->st_discarded_packets, "crypt", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- wireless_dev->rd_frag = rrddim_add(wireless_dev->st_discarded_packets, "frag", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- wireless_dev->rd_retry = rrddim_add(wireless_dev->st_discarded_packets, "retry", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- wireless_dev->rd_misc = rrddim_add(wireless_dev->st_discarded_packets, "misc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_discarded_packets);
- }
-
- rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_nwid, (collected_number)wireless_dev->nwid);
- rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_crypt, (collected_number)wireless_dev->crypt);
- rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_frag, (collected_number)wireless_dev->frag);
- rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_retry, (collected_number)wireless_dev->retry);
- rrddim_set_by_pointer(wireless_dev->st_discarded_packets, wireless_dev->rd_misc, (collected_number)wireless_dev->misc);
-
- rrdset_done(wireless_dev->st_discarded_packets);
- }
-
- if (likely(do_beacon)) {
- wireless_dev->missed_beacon = str2kernel_uint_t(procfile_lineword(ff, l, 10));
-
- if (unlikely(!wireless_dev->st_missed_beacon)) {
- wireless_dev->st_missed_beacon = rrdset_create_localhost(
- "wireless",
- wireless_dev->chart_id_net_missed_beacon,
- NULL,
- wireless_dev->name,
- "wireless.missed_beacons",
- "Number of missed beacons.",
- "frames/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_NETWIRELESS_NAME,
- NETDATA_CHART_PRIO_WIRELESS_IFACE + 5,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(wireless_dev->st_missed_beacon, RRDSET_FLAG_DETAIL);
-
- wireless_dev->rd_missed_beacon = rrddim_add(wireless_dev->st_missed_beacon, "missed_beacons", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_wireless(wireless_dev, wireless_dev->st_missed_beacon);
- }
-
- rrddim_set_by_pointer(wireless_dev->st_missed_beacon, wireless_dev->rd_missed_beacon, (collected_number)wireless_dev->missed_beacon);
- rrdset_done(wireless_dev->st_missed_beacon);
- }
-
- wireless_dev->updated = timestamp;
- }
-
- netwireless_cleanup(&timestamp);
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c
deleted file mode 100644
index fc5496c63..000000000
--- a/collectors/proc.plugin/proc_pagetypeinfo.c
+++ /dev/null
@@ -1,336 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-// For ULONG_MAX
-#include <limits.h>
-
-#define PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME "/proc/pagetypeinfo"
-#define CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
-
-// Zone struct is pglist_data, in include/linux/mmzone.h
-// MAX_NR_ZONES is from __MAX_NR_ZONE, which is the last value of the enum.
-#define MAX_PAGETYPE_ORDER 11
-
-// Names are in mm/page_alloc.c :: migratetype_names. Max size = 10.
-#define MAX_ZONETYPE_NAME 16
-#define MAX_PAGETYPE_NAME 16
-
-// Defined in include/linux/mmzone.h as __MAX_NR_ZONE (last enum of zone_type)
-#define MAX_ZONETYPE 6
-// Defined in include/linux/mmzone.h as MIGRATE_TYPES (last enum of migratetype)
-#define MAX_PAGETYPE 7
-
-
-//
-// /proc/pagetypeinfo is declared in mm/vmstat.c :: init_mm_internals
-//
-
-// One line of /proc/pagetypeinfo
-struct pageline {
- int node;
- char *zone;
- char *type;
- int line;
- uint64_t free_pages_size[MAX_PAGETYPE_ORDER];
- RRDDIM *rd[MAX_PAGETYPE_ORDER];
-};
-
-// Sum of all orders
-struct systemorder {
- uint64_t size;
- RRDDIM *rd;
-};
-
-
-static inline uint64_t pageline_total_count(struct pageline *p) {
- uint64_t sum = 0, o;
- for (o=0; o<MAX_PAGETYPE_ORDER; o++)
- sum += p->free_pages_size[o];
- return sum;
-}
-
-// Check if a line of /proc/pagetypeinfo is valid to use
-// Free block lines starts by "Node" && 4th col is "type"
-#define pagetypeinfo_line_valid(ff, l) (strncmp(procfile_lineword(ff, l, 0), "Node", 4) == 0 && strncmp(procfile_lineword(ff, l, 4), "type", 4) == 0)
-
-// Dimension name from the order
-#define dim_name(s, o, pagesize) (snprintfz(s, 16,"%ldKB (%lu)", (1 << o) * pagesize / 1024, o))
-
-int do_proc_pagetypeinfo(int update_every, usec_t dt) {
- (void)dt;
-
- // Config
- static int do_global, do_detail;
- static SIMPLE_PATTERN *filter_types = NULL;
-
- // Counters from parsing the file, that doesn't change after boot
- static struct systemorder systemorders[MAX_PAGETYPE_ORDER] = {};
- static struct pageline* pagelines = NULL;
- static long pagesize = 0;
- static size_t pageorders_cnt = 0, pagelines_cnt = 0, ff_lines = 0;
-
- // Handle
- static procfile *ff = NULL;
- static char ff_path[FILENAME_MAX + 1];
-
- // RRD Sets
- static RRDSET *st_order = NULL;
- static RRDSET **st_nodezonetype = NULL;
-
- // Local temp variables
- long unsigned int l, o, p;
- struct pageline *pgl = NULL;
-
- // --------------------------------------------------------------------
- // Startup: Init arch and open /proc/pagetypeinfo
- if (unlikely(!pagesize)) {
- pagesize = sysconf(_SC_PAGESIZE);
- }
-
- if(unlikely(!ff)) {
- snprintfz(ff_path, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME);
- ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "filename to monitor", ff_path), " \t:", PROCFILE_FLAG_DEFAULT);
-
- if(unlikely(!ff)) {
- strncpyz(ff_path, PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME, FILENAME_MAX);
- ff = procfile_open(PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME, " \t,", PROCFILE_FLAG_DEFAULT);
- }
- }
- if(unlikely(!ff))
- return 1;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- // --------------------------------------------------------------------
- // Init: find how many Nodes, Zones and Types
- if(unlikely(pagelines_cnt == 0)) {
- size_t nodenumlast = -1;
- char *zonenamelast = NULL;
-
- ff_lines = procfile_lines(ff);
- if(unlikely(!ff_lines)) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: Cannot read %s, zero lines reported.", ff_path);
- return 1;
- }
-
- // Configuration
- do_global = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "enable system summary", CONFIG_BOOLEAN_YES);
- do_detail = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "enable detail per-type", CONFIG_BOOLEAN_AUTO);
- filter_types = simple_pattern_create(
- config_get(CONFIG_SECTION_PLUGIN_PROC_PAGETYPEINFO, "hide charts id matching", ""), NULL,
- SIMPLE_PATTERN_SUFFIX, true);
-
- pagelines_cnt = 0;
-
- // Pass 1: how many lines would be valid
- for (l = 4; l < ff_lines; l++) {
- if (!pagetypeinfo_line_valid(ff, l))
- continue;
-
- pagelines_cnt++;
- }
- if (pagelines_cnt == 0) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse any valid line in %s", ff_path);
- return 1;
- }
-
- // 4th line is the "Free pages count per migrate type at order". Just subtract these 8 words.
- pageorders_cnt = procfile_linewords(ff, 3);
- if (pageorders_cnt < 9) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse Line 4 of %s", ff_path);
- return 1;
- }
-
- pageorders_cnt -= 9;
-
- if (pageorders_cnt > MAX_PAGETYPE_ORDER) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: pageorder found (%lu) is higher than max %d",
- (long unsigned int) pageorders_cnt, MAX_PAGETYPE_ORDER);
- return 1;
- }
-
- // Init pagelines from scanned lines
- if (!pagelines) {
- pagelines = callocz(pagelines_cnt, sizeof(struct pageline));
- if (!pagelines) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: Cannot allocate %lu pagelines of %lu B",
- (long unsigned int) pagelines_cnt, (long unsigned int) sizeof(struct pageline));
- return 1;
- }
- }
-
- // Pass 2: Scan the file again, with details
- p = 0;
- for (l=4; l < ff_lines; l++) {
-
- if (!pagetypeinfo_line_valid(ff, l))
- continue;
-
- size_t nodenum = strtoul(procfile_lineword(ff, l, 1), NULL, 10);
- char *zonename = procfile_lineword(ff, l, 3);
- char *typename = procfile_lineword(ff, l, 5);
-
- // We changed node or zone
- if (nodenum != nodenumlast || !zonenamelast || strncmp(zonename, zonenamelast, 6) != 0) {
- zonenamelast = zonename;
- }
-
- // Populate the line
- pgl = &pagelines[p];
-
- pgl->line = l;
- pgl->node = nodenum;
- pgl->type = typename;
- pgl->zone = zonename;
- for (o = 0; o < pageorders_cnt; o++)
- pgl->free_pages_size[o] = str2uint64_t(procfile_lineword(ff, l, o + 6), NULL) * 1 << o;
-
- p++;
- }
-
- // Init the RRD graphs
-
- // Per-Order: sum of all node, zone, type Grouped by order
- if (do_global != CONFIG_BOOLEAN_NO) {
- st_order = rrdset_create_localhost(
- "mem"
- , "pagetype_global"
- , NULL
- , "pagetype"
- , NULL
- , "System orders available"
- , "B"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
- , NETDATA_CHART_PRIO_MEM_PAGEFRAG
- , update_every
- , RRDSET_TYPE_STACKED
- );
- for (o = 0; o < pageorders_cnt; o++) {
- char id[3+1];
- snprintfz(id, sizeof(id) - 1, "%lu", o);
-
- char name[20+1];
- dim_name(name, o, pagesize);
-
- systemorders[o].rd = rrddim_add(st_order, id, name, pagesize, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- }
-
-
- // Per-Numa Node & Zone & Type (full detail). Only if sum(line) > 0
- st_nodezonetype = callocz(pagelines_cnt, sizeof(RRDSET *));
- for (p = 0; p < pagelines_cnt; p++) {
- pgl = &pagelines[p];
-
- // Skip invalid, refused or empty pagelines if not explicitly requested
- if (!pgl
- || do_detail == CONFIG_BOOLEAN_NO
- || (do_detail == CONFIG_BOOLEAN_AUTO && pageline_total_count(pgl) == 0 && netdata_zero_metrics_enabled != CONFIG_BOOLEAN_YES))
- continue;
-
- // "pagetype Node" + NUMA-NodeId + ZoneName + TypeName
- char setid[13+1+2+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME+1];
- snprintfz(setid, sizeof(setid) - 1, "pagetype_Node%d_%s_%s", pgl->node, pgl->zone, pgl->type);
-
- // Skip explicitly refused charts
- if (simple_pattern_matches(filter_types, setid))
- continue;
-
- // "Node" + NUMA-NodeID + ZoneName + TypeName
- char setname[4+1+MAX_ZONETYPE_NAME+1+MAX_PAGETYPE_NAME +1];
- snprintfz(setname, MAX_ZONETYPE_NAME + MAX_PAGETYPE_NAME, "Node %d %s %s", pgl->node, pgl->zone, pgl->type);
-
- st_nodezonetype[p] = rrdset_create_localhost(
- "mem"
- , setid
- , NULL
- , "pagetype"
- , "mem.pagetype"
- , setname
- , "B"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_PAGETYPEINFO_NAME
- , NETDATA_CHART_PRIO_MEM_PAGEFRAG + 1 + p
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- char node[50+1];
- snprintfz(node, sizeof(node) - 1, "node%d", pgl->node);
- rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_id", node, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_zone", pgl->zone, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st_nodezonetype[p]->rrdlabels, "node_type", pgl->type, RRDLABEL_SRC_AUTO);
-
- for (o = 0; o < pageorders_cnt; o++) {
- char dimid[3+1];
- snprintfz(dimid, sizeof(dimid) - 1, "%lu", o);
- char dimname[20+1];
- dim_name(dimname, o, pagesize);
-
- pgl->rd[o] = rrddim_add(st_nodezonetype[p], dimid, dimname, pagesize, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- }
- }
-
- // --------------------------------------------------------------------
- // Update pagelines
-
- // Process each line
- p = 0;
- for (l=4; l<ff_lines; l++) {
-
- if (!pagetypeinfo_line_valid(ff, l))
- continue;
-
- size_t words = procfile_linewords(ff, l);
-
- if (words != 7+pageorders_cnt) {
- collector_error("PLUGIN: PROC_PAGETYPEINFO: Unable to read line %lu, %lu words found instead of %lu",
- l+1, (long unsigned int) words, (long unsigned int) 7+pageorders_cnt);
- break;
- }
-
- for (o = 0; o < pageorders_cnt; o++) {
- // Reset counter
- if (p == 0)
- systemorders[o].size = 0;
-
- // Update orders of the current line
- pagelines[p].free_pages_size[o] = str2uint64_t(procfile_lineword(ff, l, o + 6), NULL) * 1 << o;
-
- // Update sum by order
- systemorders[o].size += pagelines[p].free_pages_size[o];
- }
-
- p++;
- }
-
- // --------------------------------------------------------------------
- // update RRD values
-
- // Global system per order
- if (st_order) {
- for (o = 0; o < pageorders_cnt; o++)
- rrddim_set_by_pointer(st_order, systemorders[o].rd, systemorders[o].size);
- rrdset_done(st_order);
- }
-
- // Per Node-Zone-Type
- if (do_detail) {
- for (p = 0; p < pagelines_cnt; p++) {
- // Skip empty graphs
- if (!st_nodezonetype[p])
- continue;
-
- for (o = 0; o < pageorders_cnt; o++)
- rrddim_set_by_pointer(st_nodezonetype[p], pagelines[p].rd[o], pagelines[p].free_pages_size[o]);
- rrdset_done(st_nodezonetype[p]);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_pressure.c b/collectors/proc.plugin/proc_pressure.c
deleted file mode 100644
index 4037e60ac..000000000
--- a/collectors/proc.plugin/proc_pressure.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_PRESSURE_NAME "/proc/pressure"
-#define CONFIG_SECTION_PLUGIN_PROC_PRESSURE "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_PRESSURE_NAME
-
-// linux calculates this every 2 seconds, see kernel/sched/psi.c PSI_FREQ
-#define MIN_PRESSURE_UPDATE_EVERY 2
-
-static int pressure_update_every = 0;
-
-static struct pressure resources[PRESSURE_NUM_RESOURCES] = {
- {
- .some = {
- .available = true,
- .share_time = {.id = "cpu_some_pressure", .title = "CPU some pressure"},
- .total_time = {.id = "cpu_some_pressure_stall_time", .title = "CPU some pressure stall time"}
- },
- .full = {
- // Disable CPU full pressure.
- // See https://github.com/torvalds/linux/commit/890d550d7dbac7a31ecaa78732aa22be282bb6b8
- .available = false,
- .share_time = {.id = "cpu_full_pressure", .title = "CPU full pressure"},
- .total_time = {.id = "cpu_full_pressure_stall_time", .title = "CPU full pressure stall time"}
- },
- },
- {
- .some = {
- .available = true,
- .share_time = {.id = "memory_some_pressure", .title = "Memory some pressure"},
- .total_time = {.id = "memory_some_pressure_stall_time", .title = "Memory some pressure stall time"}
- },
- .full = {
- .available = true,
- .share_time = {.id = "memory_full_pressure", .title = "Memory full pressure"},
- .total_time = {.id = "memory_full_pressure_stall_time", .title = "Memory full pressure stall time"}
- },
- },
- {
- .some = {
- .available = true,
- .share_time = {.id = "io_some_pressure", .title = "I/O some pressure"},
- .total_time = {.id = "io_some_pressure_stall_time", .title = "I/O some pressure stall time"}
- },
- .full = {
- .available = true,
- .share_time = {.id = "io_full_pressure", .title = "I/O full pressure"},
- .total_time = {.id = "io_full_pressure_stall_time", .title = "I/O full pressure stall time"}
- },
- },
- {
- .some = {
- // this is not available
- .available = false,
- .share_time = {.id = "irq_some_pressure", .title = "IRQ some pressure"},
- .total_time = {.id = "irq_some_pressure_stall_time", .title = "IRQ some pressure stall time"}
- },
- .full = {
- .available = true,
- .share_time = {.id = "irq_full_pressure", .title = "IRQ full pressure"},
- .total_time = {.id = "irq_full_pressure_stall_time", .title = "IRQ full pressure stall time"}
- },
- },
-};
-
-static struct resource_info {
- procfile *pf;
- const char *name; // metric file name
- const char *family; // webui section name
- int section_priority;
-} resource_info[PRESSURE_NUM_RESOURCES] = {
- { .name = "cpu", .family = "cpu", .section_priority = NETDATA_CHART_PRIO_SYSTEM_CPU },
- { .name = "memory", .family = "ram", .section_priority = NETDATA_CHART_PRIO_SYSTEM_RAM },
- { .name = "io", .family = "disk", .section_priority = NETDATA_CHART_PRIO_SYSTEM_IO },
- { .name = "irq", .family = "interrupts", .section_priority = NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS },
-};
-
-void update_pressure_charts(struct pressure_charts *pcs) {
- if (pcs->share_time.st) {
- rrddim_set_by_pointer(
- pcs->share_time.st, pcs->share_time.rd10, (collected_number)(pcs->share_time.value10 * 100));
- rrddim_set_by_pointer(
- pcs->share_time.st, pcs->share_time.rd60, (collected_number)(pcs->share_time.value60 * 100));
- rrddim_set_by_pointer(
- pcs->share_time.st, pcs->share_time.rd300, (collected_number)(pcs->share_time.value300 * 100));
- rrdset_done(pcs->share_time.st);
- }
- if (pcs->total_time.st) {
- rrddim_set_by_pointer(
- pcs->total_time.st, pcs->total_time.rdtotal, (collected_number)(pcs->total_time.value_total));
- rrdset_done(pcs->total_time.st);
- }
-}
-
-static void proc_pressure_do_resource(procfile *ff, int res_idx, size_t line, bool some) {
- struct pressure_charts *pcs;
- struct resource_info ri;
- pcs = some ? &resources[res_idx].some : &resources[res_idx].full;
- ri = resource_info[res_idx];
-
- if (unlikely(!pcs->share_time.st)) {
- pcs->share_time.st = rrdset_create_localhost(
- "system",
- pcs->share_time.id,
- NULL,
- ri.family,
- NULL,
- pcs->share_time.title,
- "percentage",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_PRESSURE_NAME,
- ri.section_priority + (some ? 40 : 50),
- pressure_update_every,
- RRDSET_TYPE_LINE);
- pcs->share_time.rd10 =
- rrddim_add(pcs->share_time.st, some ? "some 10" : "full 10", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd60 =
- rrddim_add(pcs->share_time.st, some ? "some 60" : "full 60", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- pcs->share_time.rd300 =
- rrddim_add(pcs->share_time.st, some ? "some 300" : "full 300", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- }
-
- pcs->share_time.value10 = strtod(procfile_lineword(ff, line, 2), NULL);
- pcs->share_time.value60 = strtod(procfile_lineword(ff, line, 4), NULL);
- pcs->share_time.value300 = strtod(procfile_lineword(ff, line, 6), NULL);
-
- if (unlikely(!pcs->total_time.st)) {
- pcs->total_time.st = rrdset_create_localhost(
- "system",
- pcs->total_time.id,
- NULL,
- ri.family,
- NULL,
- pcs->total_time.title,
- "ms",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_PRESSURE_NAME,
- ri.section_priority + (some ? 45 : 55),
- pressure_update_every,
- RRDSET_TYPE_LINE);
- pcs->total_time.rdtotal = rrddim_add(pcs->total_time.st, "time", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- pcs->total_time.value_total = str2ull(procfile_lineword(ff, line, 8), NULL) / 1000;
-}
-
-static void proc_pressure_do_resource_some(procfile *ff, int res_idx, size_t line) {
- proc_pressure_do_resource(ff, res_idx, line, true);
-}
-
-static void proc_pressure_do_resource_full(procfile *ff, int res_idx, size_t line) {
- proc_pressure_do_resource(ff, res_idx, line, false);
-}
-
-int do_proc_pressure(int update_every, usec_t dt) {
- int ok_count = 0;
- int i;
-
- static usec_t next_pressure_dt = 0;
- static char *base_path = NULL;
-
- update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every;
- pressure_update_every = update_every;
-
- if (next_pressure_dt <= dt) {
- next_pressure_dt = update_every * USEC_PER_SEC;
- } else {
- next_pressure_dt -= dt;
- return 0;
- }
-
- if (unlikely(!base_path)) {
- base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure");
- }
-
- for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) {
- procfile *ff = resource_info[i].pf;
- int do_some = resources[i].some.enabled, do_full = resources[i].full.enabled;
-
- if (!resources[i].some.available && !resources[i].full.available)
- continue;
-
- if (unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- char config_key[CONFIG_MAX_NAME + 1];
-
- snprintfz(filename
- , FILENAME_MAX
- , "%s%s/%s"
- , netdata_configured_host_prefix
- , base_path
- , resource_info[i].name);
-
- do_some = resources[i].some.available ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
- do_full = resources[i].full.available ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
-
- snprintfz(config_key, CONFIG_MAX_NAME, "enable %s some pressure", resource_info[i].name);
- do_some = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, do_some);
- resources[i].some.enabled = do_some;
-
- snprintfz(config_key, CONFIG_MAX_NAME, "enable %s full pressure", resource_info[i].name);
- do_full = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, config_key, do_full);
- resources[i].full.enabled = do_full;
-
- if (!do_full && !do_some) {
- resources[i].some.available = false;
- resources[i].full.available = false;
- continue;
- }
-
- ff = procfile_open(filename, " =", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if (unlikely(!ff)) {
- // PSI IRQ was added recently (https://github.com/torvalds/linux/commit/52b1364ba0b105122d6de0e719b36db705011ac1)
- if (strcmp(resource_info[i].name, "irq") != 0)
- collector_error("Cannot read pressure information from %s.", filename);
- resources[i].some.available = false;
- resources[i].full.available = false;
- continue;
- }
- }
-
- ff = procfile_readall(ff);
- resource_info[i].pf = ff;
- if (unlikely(!ff))
- continue;
-
- size_t lines = procfile_lines(ff);
- if (unlikely(lines < 1)) {
- collector_error("%s has no lines.", procfile_filename(ff));
- continue;
- }
-
- for(size_t l = 0; l < lines ;l++) {
- const char *key = procfile_lineword(ff, l, 0);
- if(strcmp(key, "some") == 0) {
- if(do_some) {
- proc_pressure_do_resource_some(ff, i, l);
- update_pressure_charts(&resources[i].some);
- ok_count++;
- }
- }
- else if(strcmp(key, "full") == 0) {
- if(do_full) {
- proc_pressure_do_resource_full(ff, i, l);
- update_pressure_charts(&resources[i].full);
- ok_count++;
- }
- }
- }
- }
-
- if(!ok_count)
- return 1;
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_pressure.h b/collectors/proc.plugin/proc_pressure.h
deleted file mode 100644
index 2e5cab2cc..000000000
--- a/collectors/proc.plugin/proc_pressure.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PROC_PRESSURE_H
-#define NETDATA_PROC_PRESSURE_H
-
-#define PRESSURE_NUM_RESOURCES 4
-
-struct pressure {
- int updated;
- char *filename;
-
- struct pressure_charts {
- bool available;
- int enabled;
-
- struct pressure_share_time_chart {
- const char *id;
- const char *title;
-
- double value10;
- double value60;
- double value300;
-
- RRDSET *st;
- RRDDIM *rd10;
- RRDDIM *rd60;
- RRDDIM *rd300;
- } share_time;
-
- struct pressure_total_time_chart {
- const char *id;
- const char *title;
-
- unsigned long long value_total;
-
- RRDSET *st;
- RRDDIM *rdtotal;
- } total_time;
- } some, full;
-};
-
-void update_pressure_charts(struct pressure_charts *charts);
-
-#endif //NETDATA_PROC_PRESSURE_H
diff --git a/collectors/proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c
deleted file mode 100644
index 194791603..000000000
--- a/collectors/proc.plugin/proc_self_mountinfo.c
+++ /dev/null
@@ -1,471 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-// ----------------------------------------------------------------------------
-// taken from gnulib/mountlist.c
-
-#ifndef ME_REMOTE
-/* A file system is "remote" if its Fs_name contains a ':'
- or if (it is of type (smbfs or cifs) and its Fs_name starts with '//')
- or Fs_name is equal to "-hosts" (used by autofs to mount remote fs). */
-# define ME_REMOTE(Fs_name, Fs_type) \
- (strchr (Fs_name, ':') != NULL \
- || ((Fs_name)[0] == '/' \
- && (Fs_name)[1] == '/' \
- && (strcmp (Fs_type, "smbfs") == 0 \
- || strcmp (Fs_type, "cifs") == 0)) \
- || (strcmp("-hosts", Fs_name) == 0))
-#endif
-
-#define ME_DUMMY_0(Fs_name, Fs_type) \
- (strcmp (Fs_type, "autofs") == 0 \
- || strcmp (Fs_type, "proc") == 0 \
- || strcmp (Fs_type, "subfs") == 0 \
- /* for Linux 2.6/3.x */ \
- || strcmp (Fs_type, "debugfs") == 0 \
- || strcmp (Fs_type, "devpts") == 0 \
- || strcmp (Fs_type, "fusectl") == 0 \
- || strcmp (Fs_type, "mqueue") == 0 \
- || strcmp (Fs_type, "rpc_pipefs") == 0 \
- || strcmp (Fs_type, "sysfs") == 0 \
- /* FreeBSD, Linux 2.4 */ \
- || strcmp (Fs_type, "devfs") == 0 \
- /* for NetBSD 3.0 */ \
- || strcmp (Fs_type, "kernfs") == 0 \
- /* for Irix 6.5 */ \
- || strcmp (Fs_type, "ignore") == 0)
-
-/* Historically, we have marked as "dummy" any file system of type "none",
- but now that programs like du need to know about bind-mounted directories,
- we grant an exception to any with "bind" in its list of mount options.
- I.e., those are *not* dummy entries. */
-# define ME_DUMMY(Fs_name, Fs_type) \
- (ME_DUMMY_0 (Fs_name, Fs_type) || strcmp (Fs_type, "none") == 0)
-
-// ----------------------------------------------------------------------------
-
-// find the mount info with the given major:minor
-// in the supplied linked list of mountinfo structures
-struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor, char *device) {
- struct mountinfo *mi;
-
- uint32_t hash = simple_hash(device);
-
- for(mi = root; mi ; mi = mi->next)
- if (unlikely(
- mi->major == major &&
- mi->minor == minor &&
- mi->mount_source_name_hash == hash &&
- !strcmp(mi->mount_source_name, device)))
- return mi;
-
- return NULL;
-}
-
-// find the mount info with the given filesystem and mount_source
-// in the supplied linked list of mountinfo structures
-struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source) {
- struct mountinfo *mi;
- uint32_t filesystem_hash = simple_hash(filesystem), mount_source_hash = simple_hash(mount_source);
-
- for(mi = root; mi ; mi = mi->next)
- if(unlikely(mi->filesystem
- && mi->mount_source
- && mi->filesystem_hash == filesystem_hash
- && mi->mount_source_hash == mount_source_hash
- && !strcmp(mi->filesystem, filesystem)
- && !strcmp(mi->mount_source, mount_source)))
- return mi;
-
- return NULL;
-}
-
-struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options) {
- struct mountinfo *mi;
- uint32_t filesystem_hash = simple_hash(filesystem);
-
- size_t solen = strlen(super_options);
-
- for(mi = root; mi ; mi = mi->next)
- if(unlikely(mi->filesystem
- && mi->super_options
- && mi->filesystem_hash == filesystem_hash
- && !strcmp(mi->filesystem, filesystem))) {
-
- // super_options is a comma separated list
- char *s = mi->super_options, *e;
- while(*s) {
- e = s + 1;
- while(*e && *e != ',') e++;
-
- size_t len = e - s;
- if(unlikely(len == solen && !strncmp(s, super_options, len)))
- return mi;
-
- if(*e == ',') s = ++e;
- else s = e;
- }
- }
-
- return NULL;
-}
-
-static void mountinfo_free(struct mountinfo *mi) {
- freez(mi->root);
- freez(mi->mount_point);
- freez(mi->mount_options);
- freez(mi->persistent_id);
-/*
- if(mi->optional_fields_count) {
- int i;
- for(i = 0; i < mi->optional_fields_count ; i++)
- free(*mi->optional_fields[i]);
- }
- free(mi->optional_fields);
-*/
- freez(mi->filesystem);
- freez(mi->mount_source);
- freez(mi->mount_source_name);
- freez(mi->super_options);
- freez(mi);
-}
-
-// free a linked list of mountinfo structures
-void mountinfo_free_all(struct mountinfo *mi) {
- while(mi) {
- struct mountinfo *t = mi;
- mi = mi->next;
-
- mountinfo_free(t);
- }
-}
-
-static char *strdupz_decoding_octal(const char *string) {
- char *buffer = strdupz(string);
-
- char *d = buffer;
- const char *s = string;
-
- while(*s) {
- if(unlikely(*s == '\\')) {
- s++;
- if(likely(isdigit(*s) && isdigit(s[1]) && isdigit(s[2]))) {
- char c = *s++ - '0';
- c <<= 3;
- c |= *s++ - '0';
- c <<= 3;
- c |= *s++ - '0';
- *d++ = c;
- }
- else *d++ = '_';
- }
- else *d++ = *s++;
- }
- *d = '\0';
-
- return buffer;
-}
-
-static inline int is_read_only(const char *s) {
- if(!s) return 0;
-
- size_t len = strlen(s);
- if(len < 2) return 0;
- if(len == 2) {
- if(!strcmp(s, "ro")) return 1;
- return 0;
- }
- if(!strncmp(s, "ro,", 3)) return 1;
- if(!strncmp(&s[len - 3], ",ro", 3)) return 1;
- if(strstr(s, ",ro,")) return 1;
- return 0;
-}
-
-// for the full list of protected mount points look at
-// https://github.com/systemd/systemd/blob/1eb3ef78b4df28a9e9f464714208f2682f957e36/src/core/namespace.c#L142-L149
-// https://github.com/systemd/systemd/blob/1eb3ef78b4df28a9e9f464714208f2682f957e36/src/core/namespace.c#L180-L194
-static const char *systemd_protected_mount_points[] = {
- "/home",
- "/root",
- "/usr",
- "/boot",
- "/efi",
- "/etc",
- "/run/user",
- "/lib",
- "/lib64",
- "/bin",
- "/sbin",
- NULL
-};
-
-static inline int mount_point_is_protected(char *mount_point)
-{
- for (size_t i = 0; systemd_protected_mount_points[i] != NULL; i++)
- if (!strcmp(mount_point, systemd_protected_mount_points[i]))
- return 1;
-
- return 0;
-}
-
-// read the whole mountinfo into a linked list
-struct mountinfo *mountinfo_read(int do_statvfs) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
- procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
- ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return NULL;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return NULL;
-
- struct mountinfo *root = NULL, *last = NULL, *mi = NULL;
-
- // create a dictionary to track uniqueness
- DICTIONARY *dict = dictionary_create_advanced(
- DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_NAME_LINK_DONT_CLONE,
- &dictionary_stats_category_collectors, 0);
-
- unsigned long l, lines = procfile_lines(ff);
- for(l = 0; l < lines ;l++) {
- if(unlikely(procfile_linewords(ff, l) < 5))
- continue;
-
- // make sure we don't add the same item twice
- char *v = (char *)dictionary_set(dict, procfile_lineword(ff, l, 4), "N", 2);
- if(v) {
- if(*v == 'O') continue;
- *v = 'O';
- }
-
- mi = mallocz(sizeof(struct mountinfo));
-
- unsigned long w = 0;
- mi->id = str2ul(procfile_lineword(ff, l, w)); w++;
- mi->parentid = str2ul(procfile_lineword(ff, l, w)); w++;
-
- char *major = procfile_lineword(ff, l, w), *minor; w++;
- for(minor = major; *minor && *minor != ':' ;minor++) ;
-
- if(unlikely(!*minor)) {
- collector_error("Cannot parse major:minor on '%s' at line %lu of '%s'", major, l + 1, filename);
- freez(mi);
- continue;
- }
-
- *minor = '\0';
- minor++;
-
- mi->flags = 0;
-
- mi->major = str2ul(major);
- mi->minor = str2ul(minor);
-
- mi->root = strdupz(procfile_lineword(ff, l, w)); w++;
- mi->root_hash = simple_hash(mi->root);
-
- mi->mount_point = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
- mi->mount_point_hash = simple_hash(mi->mount_point);
-
- mi->persistent_id = strdupz(mi->mount_point);
- netdata_fix_chart_id(mi->persistent_id);
- mi->persistent_id_hash = simple_hash(mi->persistent_id);
-
- mi->mount_options = strdupz(procfile_lineword(ff, l, w)); w++;
-
- if(unlikely(is_read_only(mi->mount_options)))
- mi->flags |= MOUNTINFO_READONLY;
-
- if(unlikely(mount_point_is_protected(mi->mount_point)))
- mi->flags |= MOUNTINFO_IS_IN_SYSD_PROTECTED_LIST;
-
- // count the optional fields
-/*
- unsigned long wo = w;
-*/
- mi->optional_fields_count = 0;
- char *s = procfile_lineword(ff, l, w);
- while(*s && *s != '-') {
- w++;
- s = procfile_lineword(ff, l, w);
- mi->optional_fields_count++;
- }
-
-/*
- if(unlikely(mi->optional_fields_count)) {
- // we have some optional fields
- // read them into a new array of pointers;
-
- mi->optional_fields = mallocz(mi->optional_fields_count * sizeof(char *));
-
- int i;
- for(i = 0; i < mi->optional_fields_count ; i++) {
- *mi->optional_fields[wo] = strdupz(procfile_lineword(ff, l, w));
- wo++;
- }
- }
- else
- mi->optional_fields = NULL;
-*/
-
- if(likely(*s == '-')) {
- w++;
-
- mi->filesystem = strdupz(procfile_lineword(ff, l, w)); w++;
- mi->filesystem_hash = simple_hash(mi->filesystem);
-
- mi->mount_source = strdupz_decoding_octal(procfile_lineword(ff, l, w)); w++;
- mi->mount_source_hash = simple_hash(mi->mount_source);
-
- mi->mount_source_name = strdupz(basename(mi->mount_source));
- mi->mount_source_name_hash = simple_hash(mi->mount_source_name);
-
- mi->super_options = strdupz(procfile_lineword(ff, l, w)); w++;
-
- if(unlikely(is_read_only(mi->super_options)))
- mi->flags |= MOUNTINFO_READONLY;
-
- if(unlikely(ME_DUMMY(mi->mount_source, mi->filesystem)))
- mi->flags |= MOUNTINFO_IS_DUMMY;
-
- if(unlikely(ME_REMOTE(mi->mount_source, mi->filesystem)))
- mi->flags |= MOUNTINFO_IS_REMOTE;
-
- // mark as BIND the duplicates (i.e. same filesystem + same source)
- if(do_statvfs) {
- struct stat buf;
- if(unlikely(stat(mi->mount_point, &buf) == -1)) {
- mi->st_dev = 0;
- mi->flags |= MOUNTINFO_NO_STAT;
- }
- else {
- mi->st_dev = buf.st_dev;
-
- struct mountinfo *mt;
- for(mt = root; mt; mt = mt->next) {
- if(unlikely(mt->st_dev == mi->st_dev && !(mt->flags & MOUNTINFO_IS_SAME_DEV))) {
- if(strlen(mi->mount_point) < strlen(mt->mount_point))
- mt->flags |= MOUNTINFO_IS_SAME_DEV;
- else
- mi->flags |= MOUNTINFO_IS_SAME_DEV;
- }
- }
- }
- }
- else {
- mi->st_dev = 0;
- }
-
- //try to detect devices with same minor and major modes. Within these,
- //the larger mount point is considered a bind.
- struct mountinfo *mt;
- for(mt = root; mt; mt = mt->next) {
- if(unlikely(mt->major == mi->major && mt->minor == mi->minor && !(mi->flags & MOUNTINFO_IS_BIND))) {
- if(strlen(mi->root) < strlen(mt->root))
- mt->flags |= MOUNTINFO_IS_BIND;
- else
- mi->flags |= MOUNTINFO_IS_BIND;
- }
- }
- }
- else {
- mi->filesystem = NULL;
- mi->filesystem_hash = 0;
-
- mi->mount_source = NULL;
- mi->mount_source_hash = 0;
-
- mi->mount_source_name = NULL;
- mi->mount_source_name_hash = 0;
-
- mi->super_options = NULL;
-
- mi->st_dev = 0;
- }
-
- // check if it has size
- if(do_statvfs && !(mi->flags & MOUNTINFO_IS_DUMMY)) {
- struct statvfs buff_statvfs;
- if(unlikely(statvfs(mi->mount_point, &buff_statvfs) < 0)) {
- mi->flags |= MOUNTINFO_NO_STAT;
- }
- else if(unlikely(!buff_statvfs.f_blocks /* || !buff_statvfs.f_files */)) {
- mi->flags |= MOUNTINFO_NO_SIZE;
- }
- }
-
- // link it
- if(unlikely(!root))
- root = mi;
- else
- last->next = mi;
-
- last = mi;
- mi->next = NULL;
-
-/*
-#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "MOUNTINFO: %ld %ld %lu:%lu root '%s', persistent id '%s', mount point '%s', mount options '%s', filesystem '%s', mount source '%s', super options '%s'%s%s%s%s%s%s\n",
- mi->id,
- mi->parentid,
- mi->major,
- mi->minor,
- mi->root,
- mi->persistent_id,
- (mi->mount_point)?mi->mount_point:"",
- (mi->mount_options)?mi->mount_options:"",
- (mi->filesystem)?mi->filesystem:"",
- (mi->mount_source)?mi->mount_source:"",
- (mi->super_options)?mi->super_options:"",
- (mi->flags & MOUNTINFO_IS_DUMMY)?" DUMMY":"",
- (mi->flags & MOUNTINFO_IS_BIND)?" BIND":"",
- (mi->flags & MOUNTINFO_IS_REMOTE)?" REMOTE":"",
- (mi->flags & MOUNTINFO_NO_STAT)?" NOSTAT":"",
- (mi->flags & MOUNTINFO_NO_SIZE)?" NOSIZE":"",
- (mi->flags & MOUNTINFO_IS_SAME_DEV)?" SAMEDEV":""
- );
-#endif
-*/
- }
-
-/* find if the mount options have "bind" in them
- {
- FILE *fp = setmntent(MOUNTED, "r");
- if (fp != NULL) {
- struct mntent mntbuf;
- struct mntent *mnt;
- char buf[4096 + 1];
-
- while ((mnt = getmntent_r(fp, &mntbuf, buf, 4096))) {
- char *bind = hasmntopt(mnt, "bind");
- if(unlikely(bind)) {
- struct mountinfo *mi;
- for(mi = root; mi ; mi = mi->next) {
- if(unlikely(strcmp(mnt->mnt_dir, mi->mount_point) == 0)) {
- fprintf(stderr, "Mount point '%s' is BIND\n", mi->mount_point);
- mi->flags |= MOUNTINFO_IS_BIND;
- break;
- }
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(!mi)) {
- collector_error("Mount point '%s' not found in /proc/self/mountinfo", mnt->mnt_dir);
- }
-#endif
- }
- }
- endmntent(fp);
- }
- }
-*/
-
- dictionary_destroy(dict);
- procfile_close(ff);
- return root;
-}
diff --git a/collectors/proc.plugin/proc_self_mountinfo.h b/collectors/proc.plugin/proc_self_mountinfo.h
deleted file mode 100644
index 4bd24d2d2..000000000
--- a/collectors/proc.plugin/proc_self_mountinfo.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PROC_SELF_MOUNTINFO_H
-#define NETDATA_PROC_SELF_MOUNTINFO_H 1
-
-#define MOUNTINFO_IS_DUMMY 0x00000001
-#define MOUNTINFO_IS_REMOTE 0x00000002
-#define MOUNTINFO_IS_BIND 0x00000004
-#define MOUNTINFO_IS_SAME_DEV 0x00000008
-#define MOUNTINFO_NO_STAT 0x00000010
-#define MOUNTINFO_NO_SIZE 0x00000020
-#define MOUNTINFO_READONLY 0x00000040
-#define MOUNTINFO_IS_IN_SYSD_PROTECTED_LIST 0x00000080
-
-struct mountinfo {
- long id; // mount ID: unique identifier of the mount (may be reused after umount(2)).
- long parentid; // parent ID: ID of parent mount (or of self for the top of the mount tree).
- unsigned long major; // major:minor: value of st_dev for files on filesystem (see stat(2)).
- unsigned long minor;
-
- char *persistent_id; // a calculated persistent id for the mount point
- uint32_t persistent_id_hash;
-
- char *root; // root: root of the mount within the filesystem.
- uint32_t root_hash;
-
- char *mount_point; // mount point: mount point relative to the process's root.
- uint32_t mount_point_hash;
-
- char *mount_options; // mount options: per-mount options.
-
- int optional_fields_count;
-/*
- char ***optional_fields; // optional fields: zero or more fields of the form "tag[:value]".
-*/
- char *filesystem; // filesystem type: name of filesystem in the form "type[.subtype]".
- uint32_t filesystem_hash;
-
- char *mount_source; // mount source: filesystem-specific information or "none".
- uint32_t mount_source_hash;
-
- char *mount_source_name;
- uint32_t mount_source_name_hash;
-
- char *super_options; // super options: per-superblock options.
-
- uint32_t flags;
-
- dev_t st_dev; // id of device as given by stat()
-
- struct mountinfo *next;
-};
-
-struct mountinfo *mountinfo_find(struct mountinfo *root, unsigned long major, unsigned long minor, char *device);
-struct mountinfo *mountinfo_find_by_filesystem_mount_source(struct mountinfo *root, const char *filesystem, const char *mount_source);
-struct mountinfo *mountinfo_find_by_filesystem_super_option(struct mountinfo *root, const char *filesystem, const char *super_options);
-
-void mountinfo_free_all(struct mountinfo *mi);
-struct mountinfo *mountinfo_read(int do_statvfs);
-
-#endif /* NETDATA_PROC_SELF_MOUNTINFO_H */
diff --git a/collectors/proc.plugin/proc_softirqs.c b/collectors/proc.plugin/proc_softirqs.c
deleted file mode 100644
index 5f0502f66..000000000
--- a/collectors/proc.plugin/proc_softirqs.c
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_SOFTIRQS_NAME "/proc/softirqs"
-
-#define MAX_INTERRUPT_NAME 50
-
-struct cpu_interrupt {
- unsigned long long value;
- RRDDIM *rd;
-};
-
-struct interrupt {
- int used;
- char *id;
- char name[MAX_INTERRUPT_NAME + 1];
- RRDDIM *rd;
- unsigned long long total;
- struct cpu_interrupt cpu[];
-};
-
-// since each interrupt is variable in size
-// we use this to calculate its record size
-#define recordsize(cpus) (sizeof(struct interrupt) + ((cpus) * sizeof(struct cpu_interrupt)))
-
-// given a base, get a pointer to each record
-#define irrindex(base, line, cpus) ((struct interrupt *)&((char *)(base))[(line) * recordsize(cpus)])
-
-static inline struct interrupt *get_interrupts_array(size_t lines, int cpus) {
- static struct interrupt *irrs = NULL;
- static size_t allocated = 0;
-
- if(unlikely(lines != allocated)) {
- uint32_t l;
- int c;
-
- irrs = (struct interrupt *)reallocz(irrs, lines * recordsize(cpus));
-
- // reset all interrupt RRDDIM pointers as any line could have shifted
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- irr->rd = NULL;
- irr->name[0] = '\0';
- for(c = 0; c < cpus ;c++)
- irr->cpu[c].rd = NULL;
- }
-
- allocated = lines;
- }
-
- return irrs;
-}
-
-int do_proc_softirqs(int update_every, usec_t dt) {
- (void)dt;
- static procfile *ff = NULL;
- static int cpus = -1, do_per_core = CONFIG_BOOLEAN_INVALID;
- struct interrupt *irrs = NULL;
-
- if(unlikely(do_per_core == CONFIG_BOOLEAN_INVALID))
- do_per_core = config_get_boolean_ondemand("plugin:proc:/proc/softirqs", "interrupts per core", CONFIG_BOOLEAN_AUTO);
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/softirqs");
- ff = procfile_open(config_get("plugin:proc:/proc/softirqs", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
- size_t words = procfile_linewords(ff, 0);
-
- if(unlikely(!lines)) {
- collector_error("Cannot read /proc/softirqs, zero lines reported.");
- return 1;
- }
-
- // find how many CPUs are there
- if(unlikely(cpus == -1)) {
- uint32_t w;
- cpus = 0;
- for(w = 0; w < words ; w++) {
- if(likely(strncmp(procfile_lineword(ff, 0, w), "CPU", 3) == 0))
- cpus++;
- }
- }
-
- if(unlikely(!cpus)) {
- collector_error("PLUGIN: PROC_SOFTIRQS: Cannot find the number of CPUs in /proc/softirqs");
- return 1;
- }
-
- // allocate the size we need;
- irrs = get_interrupts_array(lines, cpus);
- irrs[0].used = 0;
-
- // loop through all lines
- for(l = 1; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- irr->used = 0;
- irr->total = 0;
-
- words = procfile_linewords(ff, l);
- if(unlikely(!words)) continue;
-
- irr->id = procfile_lineword(ff, l, 0);
- if(unlikely(!irr->id || !irr->id[0])) continue;
-
- int c;
- for(c = 0; c < cpus ;c++) {
- if(likely((c + 1) < (int)words))
- irr->cpu[c].value = str2ull(procfile_lineword(ff, l, (uint32_t) (c + 1)), NULL);
- else
- irr->cpu[c].value = 0;
-
- irr->total += irr->cpu[c].value;
- }
-
- strncpyz(irr->name, irr->id, MAX_INTERRUPT_NAME);
-
- irr->used = 1;
- }
-
- // --------------------------------------------------------------------
-
- static RRDSET *st_system_softirqs = NULL;
- if(unlikely(!st_system_softirqs)) {
- st_system_softirqs = rrdset_create_localhost(
- "system"
- , "softirqs"
- , NULL
- , "softirqs"
- , NULL
- , "System softirqs"
- , "softirqs/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
- , NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS
- , update_every
- , RRDSET_TYPE_STACKED
- );
- }
-
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
-
- if(irr->used && irr->total) {
- // some interrupt may have changed without changing the total number of lines
- // if the same number of interrupts have been added and removed between two
- // calls of this function.
- if(unlikely(!irr->rd || strncmp(irr->name, rrddim_name(irr->rd), MAX_INTERRUPT_NAME) != 0)) {
- irr->rd = rrddim_add(st_system_softirqs, irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_reset_name(st_system_softirqs, irr->rd, irr->name);
-
- // also reset per cpu RRDDIMs to avoid repeating strncmp() in the per core loop
- if(likely(do_per_core != CONFIG_BOOLEAN_NO)) {
- int c;
- for(c = 0; c < cpus; c++) irr->cpu[c].rd = NULL;
- }
- }
-
- rrddim_set_by_pointer(st_system_softirqs, irr->rd, irr->total);
- }
- }
-
- rrdset_done(st_system_softirqs);
-
- // --------------------------------------------------------------------
-
- if(do_per_core != CONFIG_BOOLEAN_NO) {
- static RRDSET **core_st = NULL;
- static int old_cpus = 0;
-
- if(old_cpus < cpus) {
- core_st = reallocz(core_st, sizeof(RRDSET *) * cpus);
- memset(&core_st[old_cpus], 0, sizeof(RRDSET *) * (cpus - old_cpus));
- old_cpus = cpus;
- }
-
- int c;
-
- for(c = 0; c < cpus ; c++) {
- if(unlikely(!core_st[c])) {
- // find if everything is just zero
- unsigned long long core_sum = 0;
-
- for (l = 0; l < lines; l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
- if (unlikely(!irr->used)) continue;
- core_sum += irr->cpu[c].value;
- }
-
- if (unlikely(core_sum == 0)) continue; // try next core
-
- char id[50 + 1];
- snprintfz(id, sizeof(id) - 1, "cpu%d_softirqs", c);
-
- char title[100 + 1];
- snprintfz(title, sizeof(title) - 1, "CPU softirqs");
-
- core_st[c] = rrdset_create_localhost(
- "cpu"
- , id
- , NULL
- , "softirqs"
- , "cpu.softirqs"
- , title
- , "softirqs/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_SOFTIRQS_NAME
- , NETDATA_CHART_PRIO_SOFTIRQS_PER_CORE + c
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- char core[50+1];
- snprintfz(core, sizeof(core) - 1, "cpu%d", c);
- rrdlabels_add(core_st[c]->rrdlabels, "cpu", core, RRDLABEL_SRC_AUTO);
- }
-
- for(l = 0; l < lines ;l++) {
- struct interrupt *irr = irrindex(irrs, l, cpus);
-
- if(irr->used && (do_per_core == CONFIG_BOOLEAN_YES || irr->cpu[c].value)) {
- if(unlikely(!irr->cpu[c].rd)) {
- irr->cpu[c].rd = rrddim_add(core_st[c], irr->id, irr->name, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_reset_name(core_st[c], irr->cpu[c].rd, irr->name);
- }
-
- rrddim_set_by_pointer(core_st[c], irr->cpu[c].rd, irr->cpu[c].value);
- }
- }
-
- rrdset_done(core_st[c]);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c
deleted file mode 100644
index 27178b60f..000000000
--- a/collectors/proc.plugin/proc_spl_kstat_zfs.c
+++ /dev/null
@@ -1,435 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-#include "zfs_common.h"
-
-#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats"
-#define ZFS_PROC_POOLS "/proc/spl/kstat/zfs"
-
-#define STATE_SIZE 9
-#define MAX_CHART_ID 256
-
-extern struct arcstats arcstats;
-
-unsigned long long zfs_arcstats_shrinkable_cache_size_bytes = 0;
-
-int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
- (void)dt;
-
- static int show_zero_charts = 0, do_zfs_stats = 0;
- static procfile *ff = NULL;
- static char *dirname = NULL;
- static ARL_BASE *arl_base = NULL;
-
- arcstats.l2exist = -1;
-
- if(unlikely(!arl_base)) {
- arl_base = arl_create("arcstats", NULL, 60);
-
- arl_expect(arl_base, "hits", &arcstats.hits);
- arl_expect(arl_base, "misses", &arcstats.misses);
- arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits);
- arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses);
- arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits);
- arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses);
- arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits);
- arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses);
- arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits);
- arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses);
- arl_expect(arl_base, "mru_hits", &arcstats.mru_hits);
- arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits);
- arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits);
- arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits);
- arl_expect(arl_base, "deleted", &arcstats.deleted);
- arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss);
- arl_expect(arl_base, "evict_skip", &arcstats.evict_skip);
- arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough);
- arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached);
- arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible);
- arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible);
- arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip);
- arl_expect(arl_base, "hash_elements", &arcstats.hash_elements);
- arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max);
- arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions);
- arl_expect(arl_base, "hash_chains", &arcstats.hash_chains);
- arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max);
- arl_expect(arl_base, "p", &arcstats.p);
- arl_expect(arl_base, "c", &arcstats.c);
- arl_expect(arl_base, "c_min", &arcstats.c_min);
- arl_expect(arl_base, "c_max", &arcstats.c_max);
- arl_expect(arl_base, "size", &arcstats.size);
- arl_expect(arl_base, "hdr_size", &arcstats.hdr_size);
- arl_expect(arl_base, "data_size", &arcstats.data_size);
- arl_expect(arl_base, "metadata_size", &arcstats.metadata_size);
- arl_expect(arl_base, "other_size", &arcstats.other_size);
- arl_expect(arl_base, "anon_size", &arcstats.anon_size);
- arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data);
- arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata);
- arl_expect(arl_base, "mru_size", &arcstats.mru_size);
- arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data);
- arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata);
- arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size);
- arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data);
- arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata);
- arl_expect(arl_base, "mfu_size", &arcstats.mfu_size);
- arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data);
- arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata);
- arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size);
- arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data);
- arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata);
- arl_expect(arl_base, "l2_hits", &arcstats.l2_hits);
- arl_expect(arl_base, "l2_misses", &arcstats.l2_misses);
- arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds);
- arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash);
- arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes);
- arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes);
- arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent);
- arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done);
- arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error);
- arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry);
- arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry);
- arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading);
- arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached);
- arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write);
- arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write);
- arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem);
- arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad);
- arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error);
- arl_expect(arl_base, "l2_size", &arcstats.l2_size);
- arl_expect(arl_base, "l2_asize", &arcstats.l2_asize);
- arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size);
- arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes);
- arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros);
- arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures);
- arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count);
- arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers);
- arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size);
- arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads);
- arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count);
- arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count);
- arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow);
- arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve);
- arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes);
- arl_expect(arl_base, "arc_prune", &arcstats.arc_prune);
- arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used);
- arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit);
- arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max);
- arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min);
- arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free);
- arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free);
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS);
- ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff))
- return 1;
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/spl/kstat/zfs");
- dirname = config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "directory to monitor", filename);
-
- show_zero_charts = config_get_boolean_ondemand("plugin:proc:" ZFS_PROC_ARCSTATS, "show zero charts", CONFIG_BOOLEAN_NO);
- if(show_zero_charts == CONFIG_BOOLEAN_AUTO && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES)
- show_zero_charts = CONFIG_BOOLEAN_YES;
- if(unlikely(show_zero_charts == CONFIG_BOOLEAN_YES))
- do_zfs_stats = 1;
- }
-
- // check if any pools exist
- if(likely(!do_zfs_stats)) {
- DIR *dir = opendir(dirname);
- if(unlikely(!dir)) {
- collector_error("Cannot read directory '%s'", dirname);
- return 1;
- }
-
- struct dirent *de = NULL;
- while(likely(de = readdir(dir))) {
- if(likely(de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- )))
- continue;
-
- if(unlikely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
- do_zfs_stats = 1;
- break;
- }
- }
-
- closedir(dir);
- }
-
- // do not show ZFS filesystem metrics if there haven't been any pools in the system yet
- if(unlikely(!do_zfs_stats))
- return 0;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- arl_begin(arl_base);
-
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 3)) {
- if(unlikely(words)) collector_error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words);
- continue;
- }
-
- const char *key = procfile_lineword(ff, l, 0);
- const char *value = procfile_lineword(ff, l, 2);
-
- if(unlikely(arcstats.l2exist == -1)) {
- if(key[0] == 'l' && key[1] == '2' && key[2] == '_')
- arcstats.l2exist = 1;
- }
-
- if(unlikely(arl_check(arl_base, key, value))) break;
- }
-
- if (arcstats.size > arcstats.c_min) {
- zfs_arcstats_shrinkable_cache_size_bytes = arcstats.size - arcstats.c_min;
- } else {
- zfs_arcstats_shrinkable_cache_size_bytes = 0;
- }
-
- if(unlikely(arcstats.l2exist == -1))
- arcstats.l2exist = 0;
-
- generate_charts_arcstats(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
- generate_charts_arc_summary(PLUGIN_PROC_NAME, ZFS_PROC_ARCSTATS, show_zero_charts, update_every);
-
- return 0;
-}
-
-struct zfs_pool {
- RRDSET *st;
-
- RRDDIM *rd_online;
- RRDDIM *rd_degraded;
- RRDDIM *rd_faulted;
- RRDDIM *rd_offline;
- RRDDIM *rd_removed;
- RRDDIM *rd_unavail;
- RRDDIM *rd_suspended;
-
- int updated;
- int disabled;
-
- int online;
- int degraded;
- int faulted;
- int offline;
- int removed;
- int unavail;
- int suspended;
-};
-
-struct deleted_zfs_pool {
- char *name;
- struct deleted_zfs_pool *next;
-} *deleted_zfs_pools = NULL;
-
-DICTIONARY *zfs_pools = NULL;
-
-void disable_zfs_pool_state(struct zfs_pool *pool)
-{
- if (pool->st)
- rrdset_is_obsolete___safe_from_collector_thread(pool->st);
-
- pool->st = NULL;
-
- pool->rd_online = NULL;
- pool->rd_degraded = NULL;
- pool->rd_faulted = NULL;
- pool->rd_offline = NULL;
- pool->rd_removed = NULL;
- pool->rd_unavail = NULL;
- pool->rd_suspended = NULL;
-
- pool->disabled = 1;
-}
-
-int update_zfs_pool_state_chart(const DICTIONARY_ITEM *item, void *pool_p, void *update_every_p) {
- const char *name = dictionary_acquired_item_name(item);
- struct zfs_pool *pool = (struct zfs_pool *)pool_p;
- int update_every = *(int *)update_every_p;
-
- if (pool->updated) {
- pool->updated = 0;
-
- if (!pool->disabled) {
- if (unlikely(!pool->st)) {
- char chart_id[MAX_CHART_ID + 1];
- snprintf(chart_id, MAX_CHART_ID, "state_%s", name);
-
- pool->st = rrdset_create_localhost(
- "zfspool",
- chart_id,
- NULL,
- name,
- "zfspool.state",
- "ZFS pool state",
- "boolean",
- PLUGIN_PROC_NAME,
- ZFS_PROC_POOLS,
- NETDATA_CHART_PRIO_ZFS_POOL_STATE,
- update_every,
- RRDSET_TYPE_LINE);
-
- pool->rd_online = rrddim_add(pool->st, "online", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_degraded = rrddim_add(pool->st, "degraded", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_faulted = rrddim_add(pool->st, "faulted", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_offline = rrddim_add(pool->st, "offline", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_removed = rrddim_add(pool->st, "removed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_unavail = rrddim_add(pool->st, "unavail", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- pool->rd_suspended = rrddim_add(pool->st, "suspended", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- rrdlabels_add(pool->st->rrdlabels, "pool", name, RRDLABEL_SRC_AUTO);
- }
-
- rrddim_set_by_pointer(pool->st, pool->rd_online, pool->online);
- rrddim_set_by_pointer(pool->st, pool->rd_degraded, pool->degraded);
- rrddim_set_by_pointer(pool->st, pool->rd_faulted, pool->faulted);
- rrddim_set_by_pointer(pool->st, pool->rd_offline, pool->offline);
- rrddim_set_by_pointer(pool->st, pool->rd_removed, pool->removed);
- rrddim_set_by_pointer(pool->st, pool->rd_unavail, pool->unavail);
- rrddim_set_by_pointer(pool->st, pool->rd_suspended, pool->suspended);
- rrdset_done(pool->st);
- }
- } else {
- disable_zfs_pool_state(pool);
- struct deleted_zfs_pool *new = callocz(1, sizeof(struct deleted_zfs_pool));
- new->name = strdupz(name);
- new->next = deleted_zfs_pools;
- deleted_zfs_pools = new;
- }
-
- return 0;
-}
-
-int do_proc_spl_kstat_zfs_pool_state(int update_every, usec_t dt)
-{
- (void)dt;
-
- static int do_zfs_pool_state = -1;
- static char *dirname = NULL;
-
- int pool_found = 0, state_file_found = 0;
-
- if (unlikely(do_zfs_pool_state == -1)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/spl/kstat/zfs");
- dirname = config_get("plugin:proc:" ZFS_PROC_POOLS, "directory to monitor", filename);
-
- zfs_pools = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED, &dictionary_stats_category_collectors, 0);
-
- do_zfs_pool_state = 1;
- }
-
- if (likely(do_zfs_pool_state)) {
- DIR *dir = opendir(dirname);
- if (unlikely(!dir)) {
- if (errno == ENOENT)
- collector_info("Cannot read directory '%s'", dirname);
- else
- collector_error("Cannot read directory '%s'", dirname);
- return 1;
- }
-
- struct dirent *de = NULL;
- while (likely(de = readdir(dir))) {
- if (likely(
- de->d_type == DT_DIR && ((de->d_name[0] == '.' && de->d_name[1] == '\0') ||
- (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0'))))
- continue;
-
- if (unlikely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
- pool_found = 1;
-
- struct zfs_pool *pool = dictionary_get(zfs_pools, de->d_name);
-
- if (unlikely(!pool)) {
- struct zfs_pool new_zfs_pool = {};
- pool = dictionary_set(zfs_pools, de->d_name, &new_zfs_pool, sizeof(struct zfs_pool));
- };
-
- pool->updated = 1;
-
- if (pool->disabled) {
- state_file_found = 1;
- continue;
- }
-
- pool->online = 0;
- pool->degraded = 0;
- pool->faulted = 0;
- pool->offline = 0;
- pool->removed = 0;
- pool->unavail = 0;
- pool->suspended = 0;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s/state", dirname, de->d_name);
-
- char state[STATE_SIZE + 1];
- int ret = read_file(filename, state, STATE_SIZE);
-
- if (!ret) {
- state_file_found = 1;
-
- // ZFS pool states are described at https://openzfs.github.io/openzfs-docs/man/8/zpoolconcepts.8.html?#Device_Failure_and_Recovery
- if (!strcmp(state, "ONLINE\n")) {
- pool->online = 1;
- } else if (!strcmp(state, "DEGRADED\n")) {
- pool->degraded = 1;
- } else if (!strcmp(state, "FAULTED\n")) {
- pool->faulted = 1;
- } else if (!strcmp(state, "OFFLINE\n")) {
- pool->offline = 1;
- } else if (!strcmp(state, "REMOVED\n")) {
- pool->removed = 1;
- } else if (!strcmp(state, "UNAVAIL\n")) {
- pool->unavail = 1;
- } else if (!strcmp(state, "SUSPENDED\n")) {
- pool->suspended = 1;
- } else {
- disable_zfs_pool_state(pool);
-
- char *c = strchr(state, '\n');
- if (c)
- *c = '\0';
- collector_error("ZFS POOLS: Undefined state %s for zpool %s, disabling the chart", state, de->d_name);
- }
- }
- }
- }
-
- closedir(dir);
- }
-
- if (do_zfs_pool_state && pool_found && !state_file_found) {
- collector_info("ZFS POOLS: State files not found. Disabling the module.");
- do_zfs_pool_state = 0;
- }
-
- if (do_zfs_pool_state)
- dictionary_walkthrough_read(zfs_pools, update_zfs_pool_state_chart, &update_every);
-
- while (deleted_zfs_pools) {
- struct deleted_zfs_pool *current_pool = deleted_zfs_pools;
- dictionary_del(zfs_pools, current_pool->name);
-
- deleted_zfs_pools = deleted_zfs_pools->next;
-
- freez(current_pool->name);
- freez(current_pool);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_stat.c b/collectors/proc.plugin/proc_stat.c
deleted file mode 100644
index 84160f22f..000000000
--- a/collectors/proc.plugin/proc_stat.c
+++ /dev/null
@@ -1,1070 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_STAT_NAME "/proc/stat"
-
-struct per_core_single_number_file {
- unsigned char found:1;
- const char *filename;
- int fd;
- collected_number value;
- RRDDIM *rd;
-};
-
-struct last_ticks {
- collected_number frequency;
- collected_number ticks;
-};
-
-// This is an extension of struct per_core_single_number_file at CPU_FREQ_INDEX.
-// Either scaling_cur_freq or time_in_state file is used at one time.
-struct per_core_time_in_state_file {
- const char *filename;
- procfile *ff;
- size_t last_ticks_len;
- struct last_ticks *last_ticks;
-};
-
-#define CORE_THROTTLE_COUNT_INDEX 0
-#define PACKAGE_THROTTLE_COUNT_INDEX 1
-#define CPU_FREQ_INDEX 2
-#define PER_CORE_FILES 3
-
-struct cpu_chart {
- const char *id;
-
- RRDSET *st;
- RRDDIM *rd_user;
- RRDDIM *rd_nice;
- RRDDIM *rd_system;
- RRDDIM *rd_idle;
- RRDDIM *rd_iowait;
- RRDDIM *rd_irq;
- RRDDIM *rd_softirq;
- RRDDIM *rd_steal;
- RRDDIM *rd_guest;
- RRDDIM *rd_guest_nice;
-
- struct per_core_single_number_file files[PER_CORE_FILES];
-
- struct per_core_time_in_state_file time_in_state_files;
-};
-
-static int keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
-static int keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
-
-static int read_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
- char buf[50 + 1];
- size_t x, files_read = 0, files_nonzero = 0;
-
- for(x = 0; x < len ; x++) {
- struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
-
- f->found = 0;
-
- if(unlikely(!f->filename))
- continue;
-
- if(unlikely(f->fd == -1)) {
- f->fd = open(f->filename, O_RDONLY);
- if (unlikely(f->fd == -1)) {
- collector_error("Cannot open file '%s'", f->filename);
- continue;
- }
- }
-
- ssize_t ret = read(f->fd, buf, 50);
- if(unlikely(ret < 0)) {
- // cannot read that file
-
- collector_error("Cannot read file '%s'", f->filename);
- close(f->fd);
- f->fd = -1;
- continue;
- }
- else {
- // successful read
-
- // terminate the buffer
- buf[ret] = '\0';
-
- if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
- close(f->fd);
- f->fd = -1;
- }
- else if(lseek(f->fd, 0, SEEK_SET) == -1) {
- collector_error("Cannot seek in file '%s'", f->filename);
- close(f->fd);
- f->fd = -1;
- }
- }
-
- files_read++;
- f->found = 1;
-
- f->value = str2ll(buf, NULL);
- if(likely(f->value != 0))
- files_nonzero++;
- }
-
- if(files_read == 0)
- return -1;
-
- if(files_nonzero == 0)
- return 0;
-
- return (int)files_nonzero;
-}
-
-static int read_per_core_time_in_state_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index) {
- size_t x, files_read = 0, files_nonzero = 0;
-
- for(x = 0; x < len ; x++) {
- struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
- struct per_core_time_in_state_file *tsf = &all_cpu_charts[x].time_in_state_files;
-
- f->found = 0;
-
- if(unlikely(!tsf->filename))
- continue;
-
- if(unlikely(!tsf->ff)) {
- tsf->ff = procfile_open(tsf->filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!tsf->ff))
- {
- collector_error("Cannot open file '%s'", tsf->filename);
- continue;
- }
- }
-
- tsf->ff = procfile_readall(tsf->ff);
- if(unlikely(!tsf->ff)) {
- collector_error("Cannot read file '%s'", tsf->filename);
- procfile_close(tsf->ff);
- tsf->ff = NULL;
- continue;
- }
- else {
- // successful read
-
- size_t lines = procfile_lines(tsf->ff), l;
- size_t words;
- unsigned long long total_ticks_since_last = 0, avg_freq = 0;
-
- // Check if there is at least one frequency in time_in_state
- if (procfile_word(tsf->ff, 0)[0] == '\0') {
- if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
- procfile_close(tsf->ff);
- tsf->ff = NULL;
- }
- // TODO: Is there a better way to avoid spikes than calculating the average over
- // the whole period under schedutil governor?
- // freez(tsf->last_ticks);
- // tsf->last_ticks = NULL;
- // tsf->last_ticks_len = 0;
- continue;
- }
-
- if (unlikely(tsf->last_ticks_len < lines || tsf->last_ticks == NULL)) {
- tsf->last_ticks = reallocz(tsf->last_ticks, sizeof(struct last_ticks) * lines);
- memset(tsf->last_ticks, 0, sizeof(struct last_ticks) * lines);
- tsf->last_ticks_len = lines;
- }
-
- f->value = 0;
-
- for(l = 0; l < lines - 1 ;l++) {
- unsigned long long frequency = 0, ticks = 0, ticks_since_last = 0;
-
- words = procfile_linewords(tsf->ff, l);
- if(unlikely(words < 2)) {
- collector_error("Cannot read time_in_state line. Expected 2 params, read %zu.", words);
- continue;
- }
- frequency = str2ull(procfile_lineword(tsf->ff, l, 0), NULL);
- ticks = str2ull(procfile_lineword(tsf->ff, l, 1), NULL);
-
- // It is assumed that frequencies are static and sorted
- ticks_since_last = ticks - tsf->last_ticks[l].ticks;
- tsf->last_ticks[l].frequency = frequency;
- tsf->last_ticks[l].ticks = ticks;
-
- total_ticks_since_last += ticks_since_last;
- avg_freq += frequency * ticks_since_last;
-
- }
-
- if (likely(total_ticks_since_last)) {
- avg_freq /= total_ticks_since_last;
- f->value = avg_freq;
- }
-
- if(unlikely(keep_per_core_fds_open != CONFIG_BOOLEAN_YES)) {
- procfile_close(tsf->ff);
- tsf->ff = NULL;
- }
- }
-
- files_read++;
-
- f->found = 1;
-
- if(likely(f->value != 0))
- files_nonzero++;
- }
-
- if(unlikely(files_read == 0))
- return -1;
-
- if(unlikely(files_nonzero == 0))
- return 0;
-
- return (int)files_nonzero;
-}
-
-static void chart_per_core_files(struct cpu_chart *all_cpu_charts, size_t len, size_t index, RRDSET *st, collected_number multiplier, collected_number divisor, RRD_ALGORITHM algorithm) {
- size_t x;
- for(x = 0; x < len ; x++) {
- struct per_core_single_number_file *f = &all_cpu_charts[x].files[index];
-
- if(unlikely(!f->found))
- continue;
-
- if(unlikely(!f->rd))
- f->rd = rrddim_add(st, all_cpu_charts[x].id, NULL, multiplier, divisor, algorithm);
-
- rrddim_set_by_pointer(st, f->rd, f->value);
- }
-}
-
-struct cpuidle_state {
- char *name;
-
- char *time_filename;
- int time_fd;
-
- collected_number value;
-
- RRDDIM *rd;
-};
-
-struct per_core_cpuidle_chart {
- RRDSET *st;
-
- RRDDIM *active_time_rd;
- collected_number active_time;
- collected_number last_active_time;
-
- struct cpuidle_state *cpuidle_state;
- size_t cpuidle_state_len;
- int rescan_cpu_states;
-};
-
-static void* wake_cpu_thread(void* core) {
- pthread_t thread;
- cpu_set_t cpu_set;
- static size_t cpu_wakeups = 0;
- static int errors = 0;
-
- CPU_ZERO(&cpu_set);
- CPU_SET(*(int*)core, &cpu_set);
-
- thread = pthread_self();
- if(unlikely(pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpu_set))) {
- if(unlikely(errors < 8)) {
- collector_error("Cannot set CPU affinity for core %d", *(int*)core);
- errors++;
- }
- else if(unlikely(errors < 9)) {
- collector_error("CPU affinity errors are disabled");
- errors++;
- }
- }
-
- // Make the CPU core do something to force it to update its idle counters
- cpu_wakeups++;
-
- return 0;
-}
-
-static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
- static size_t cpuidle_charts_len = 0;
- static procfile *ff = NULL;
- struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address;
- size_t cores_found = 0;
-
- if(unlikely(!ff)) {
- ff = procfile_open(schedstat_filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 1;
-
- size_t lines = procfile_lines(ff), l;
- size_t words;
-
- for(l = 0; l < lines ;l++) {
- char *row_key = procfile_lineword(ff, l, 0);
-
- // faster strncmp(row_key, "cpu", 3) == 0
- if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
- words = procfile_linewords(ff, l);
- if(unlikely(words < 10)) {
- collector_error("Cannot read /proc/schedstat cpu line. Expected 9 params, read %zu.", words);
- return 1;
- }
- cores_found++;
-
- size_t core = str2ul(&row_key[3]);
- if(unlikely(core >= cores_found)) {
- collector_error("Core %zu found but no more than %zu cores were expected.", core, cores_found);
- return 1;
- }
-
- if(unlikely(cpuidle_charts_len < cores_found)) {
- cpuidle_charts = reallocz(cpuidle_charts, sizeof(struct per_core_cpuidle_chart) * cores_found);
- *cpuidle_charts_address = cpuidle_charts;
- memset(cpuidle_charts + cpuidle_charts_len, 0, sizeof(struct per_core_cpuidle_chart) * (cores_found - cpuidle_charts_len));
- cpuidle_charts_len = cores_found;
- }
-
- cpuidle_charts[core].active_time = str2ull(procfile_lineword(ff, l, 7), NULL) / 1000;
- }
- }
-
- *schedstat_cores_found = cores_found;
- return 0;
-}
-
-static int read_one_state(char *buf, const char *filename, int *fd) {
- ssize_t ret = read(*fd, buf, 50);
-
- if(unlikely(ret <= 0)) {
- // cannot read that file
- collector_error("Cannot read file '%s'", filename);
- close(*fd);
- *fd = -1;
- return 0;
- }
- else {
- // successful read
-
- // terminate the buffer
- buf[ret - 1] = '\0';
-
- if(unlikely(keep_cpuidle_fds_open != CONFIG_BOOLEAN_YES)) {
- close(*fd);
- *fd = -1;
- }
- else if(lseek(*fd, 0, SEEK_SET) == -1) {
- collector_error("Cannot seek in file '%s'", filename);
- close(*fd);
- *fd = -1;
- }
- }
-
- return 1;
-}
-
-static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
- char filename[FILENAME_MAX + 1];
- static char next_state_filename[FILENAME_MAX + 1];
- struct stat stbuf;
- struct per_core_cpuidle_chart *cc = &cpuidle_charts[core];
- size_t state;
-
- if(unlikely(!cc->cpuidle_state_len || cc->rescan_cpu_states)) {
- int state_file_found = 1; // check at least one state
-
- if(cc->cpuidle_state_len) {
- for(state = 0; state < cc->cpuidle_state_len; state++) {
- freez(cc->cpuidle_state[state].name);
-
- freez(cc->cpuidle_state[state].time_filename);
- close(cc->cpuidle_state[state].time_fd);
- cc->cpuidle_state[state].time_fd = -1;
- }
-
- freez(cc->cpuidle_state);
- cc->cpuidle_state = NULL;
- cc->cpuidle_state_len = 0;
-
- cc->active_time_rd = NULL;
- cc->st = NULL;
- }
-
- while(likely(state_file_found)) {
- snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
- if (stat(filename, &stbuf) == 0)
- cc->cpuidle_state_len++;
- else
- state_file_found = 0;
- }
- snprintfz(next_state_filename, FILENAME_MAX, cpuidle_name_filename, core, cc->cpuidle_state_len);
-
- if(likely(cc->cpuidle_state_len))
- cc->cpuidle_state = callocz(cc->cpuidle_state_len, sizeof(struct cpuidle_state));
-
- for(state = 0; state < cc->cpuidle_state_len; state++) {
- char name_buf[50 + 1];
- snprintfz(filename, FILENAME_MAX, cpuidle_name_filename, core, state);
-
- int fd = open(filename, O_RDONLY, 0666);
- if(unlikely(fd == -1)) {
- collector_error("Cannot open file '%s'", filename);
- cc->rescan_cpu_states = 1;
- return 1;
- }
-
- ssize_t r = read(fd, name_buf, 50);
- if(unlikely(r < 1)) {
- collector_error("Cannot read file '%s'", filename);
- close(fd);
- cc->rescan_cpu_states = 1;
- return 1;
- }
-
- name_buf[r - 1] = '\0'; // erase extra character
- cc->cpuidle_state[state].name = strdupz(trim(name_buf));
- close(fd);
-
- snprintfz(filename, FILENAME_MAX, cpuidle_time_filename, core, state);
- cc->cpuidle_state[state].time_filename = strdupz(filename);
- cc->cpuidle_state[state].time_fd = -1;
- }
-
- cc->rescan_cpu_states = 0;
- }
-
- for(state = 0; state < cc->cpuidle_state_len; state++) {
-
- struct cpuidle_state *cs = &cc->cpuidle_state[state];
-
- if(unlikely(cs->time_fd == -1)) {
- cs->time_fd = open(cs->time_filename, O_RDONLY);
- if (unlikely(cs->time_fd == -1)) {
- collector_error("Cannot open file '%s'", cs->time_filename);
- cc->rescan_cpu_states = 1;
- return 1;
- }
- }
-
- char time_buf[50 + 1];
- if(likely(read_one_state(time_buf, cs->time_filename, &cs->time_fd))) {
- cs->value = str2ll(time_buf, NULL);
- }
- else {
- cc->rescan_cpu_states = 1;
- return 1;
- }
- }
-
- // check if the number of states was increased
- if(unlikely(stat(next_state_filename, &stbuf) == 0)) {
- cc->rescan_cpu_states = 1;
- return 1;
- }
-
- return 0;
-}
-
-int do_proc_stat(int update_every, usec_t dt) {
- (void)dt;
-
- static struct cpu_chart *all_cpu_charts = NULL;
- static size_t all_cpu_charts_size = 0;
- static procfile *ff = NULL;
- static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1,
- do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1;
- static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
- static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
- *time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL;
- static const RRDVAR_ACQUIRED *cpus_var = NULL;
- static int accurate_freq_avail = 0, accurate_freq_is_used = 0;
- size_t cores_found = (size_t)get_system_cpus();
-
- if(unlikely(do_cpu == -1)) {
- do_cpu = config_get_boolean("plugin:proc:/proc/stat", "cpu utilization", CONFIG_BOOLEAN_YES);
- do_cpu_cores = config_get_boolean("plugin:proc:/proc/stat", "per cpu core utilization", CONFIG_BOOLEAN_YES);
- do_interrupts = config_get_boolean("plugin:proc:/proc/stat", "cpu interrupts", CONFIG_BOOLEAN_YES);
- do_context = config_get_boolean("plugin:proc:/proc/stat", "context switches", CONFIG_BOOLEAN_YES);
- do_forks = config_get_boolean("plugin:proc:/proc/stat", "processes started", CONFIG_BOOLEAN_YES);
- do_processes = config_get_boolean("plugin:proc:/proc/stat", "processes running", CONFIG_BOOLEAN_YES);
-
- // give sane defaults based on the number of processors
- if(unlikely(get_system_cpus() > 128)) {
- // the system has too many processors
- keep_per_core_fds_open = CONFIG_BOOLEAN_NO;
- do_core_throttle_count = CONFIG_BOOLEAN_NO;
- do_package_throttle_count = CONFIG_BOOLEAN_NO;
- do_cpu_freq = CONFIG_BOOLEAN_NO;
- do_cpuidle = CONFIG_BOOLEAN_NO;
- }
- else {
- // the system has a reasonable number of processors
- keep_per_core_fds_open = CONFIG_BOOLEAN_YES;
- do_core_throttle_count = CONFIG_BOOLEAN_AUTO;
- do_package_throttle_count = CONFIG_BOOLEAN_NO;
- do_cpu_freq = CONFIG_BOOLEAN_YES;
- do_cpuidle = CONFIG_BOOLEAN_YES;
- }
- if(unlikely(get_system_cpus() > 24)) {
- // the system has too many processors
- keep_cpuidle_fds_open = CONFIG_BOOLEAN_NO;
- }
- else {
- // the system has a reasonable number of processors
- keep_cpuidle_fds_open = CONFIG_BOOLEAN_YES;
- }
-
- keep_per_core_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep per core files open", keep_per_core_fds_open);
- keep_cpuidle_fds_open = config_get_boolean("plugin:proc:/proc/stat", "keep cpuidle files open", keep_cpuidle_fds_open);
- do_core_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "core_throttle_count", do_core_throttle_count);
- do_package_throttle_count = config_get_boolean_ondemand("plugin:proc:/proc/stat", "package_throttle_count", do_package_throttle_count);
- do_cpu_freq = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu frequency", do_cpu_freq);
- do_cpuidle = config_get_boolean_ondemand("plugin:proc:/proc/stat", "cpu idle states", do_cpuidle);
-
- hash_intr = simple_hash("intr");
- hash_ctxt = simple_hash("ctxt");
- hash_processes = simple_hash("processes");
- hash_procs_running = simple_hash("procs_running");
- hash_procs_blocked = simple_hash("procs_blocked");
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count");
- core_throttle_count_filename = config_get("plugin:proc:/proc/stat", "core_throttle_count filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count");
- package_throttle_count_filename = config_get("plugin:proc:/proc/stat", "package_throttle_count filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq");
- scaling_cur_freq_filename = config_get("plugin:proc:/proc/stat", "scaling_cur_freq filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/%s/cpufreq/stats/time_in_state");
- time_in_state_filename = config_get("plugin:proc:/proc/stat", "time_in_state filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/schedstat");
- schedstat_filename = config_get("plugin:proc:/proc/stat", "schedstat filename to monitor", filename);
-
- if(do_cpuidle != CONFIG_BOOLEAN_NO) {
- struct stat stbuf;
-
- if (stat(schedstat_filename, &stbuf))
- do_cpuidle = CONFIG_BOOLEAN_NO;
- }
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name");
- cpuidle_name_filename = config_get("plugin:proc:/proc/stat", "cpuidle name filename to monitor", filename);
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time");
- cpuidle_time_filename = config_get("plugin:proc:/proc/stat", "cpuidle time filename to monitor", filename);
- }
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/stat");
- ff = procfile_open(config_get("plugin:proc:/proc/stat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
- size_t words;
-
- unsigned long long processes = 0, running = 0 , blocked = 0;
-
- for(l = 0; l < lines ;l++) {
- char *row_key = procfile_lineword(ff, l, 0);
- uint32_t hash = simple_hash(row_key);
-
- // faster strncmp(row_key, "cpu", 3) == 0
- if(likely(row_key[0] == 'c' && row_key[1] == 'p' && row_key[2] == 'u')) {
- words = procfile_linewords(ff, l);
- if(unlikely(words < 9)) {
- collector_error("Cannot read /proc/stat cpu line. Expected 9 params, read %zu.", words);
- continue;
- }
-
- size_t core = (row_key[3] == '\0') ? 0 : str2ul(&row_key[3]) + 1;
- if(likely(core > 0)) cores_found = core;
-
- if(likely((core == 0 && do_cpu) || (core > 0 && do_cpu_cores))) {
- char *id;
- unsigned long long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
-
- id = row_key;
- user = str2ull(procfile_lineword(ff, l, 1), NULL);
- nice = str2ull(procfile_lineword(ff, l, 2), NULL);
- system = str2ull(procfile_lineword(ff, l, 3), NULL);
- idle = str2ull(procfile_lineword(ff, l, 4), NULL);
- iowait = str2ull(procfile_lineword(ff, l, 5), NULL);
- irq = str2ull(procfile_lineword(ff, l, 6), NULL);
- softirq = str2ull(procfile_lineword(ff, l, 7), NULL);
- steal = str2ull(procfile_lineword(ff, l, 8), NULL);
-
- guest = str2ull(procfile_lineword(ff, l, 9), NULL);
- user -= guest;
-
- guest_nice = str2ull(procfile_lineword(ff, l, 10), NULL);
- nice -= guest_nice;
-
- char *title, *type, *context, *family;
- long priority;
-
- if(unlikely(core >= all_cpu_charts_size)) {
- size_t old_cpu_charts_size = all_cpu_charts_size;
- all_cpu_charts_size = core + 1;
- all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * all_cpu_charts_size);
- memset(&all_cpu_charts[old_cpu_charts_size], 0, sizeof(struct cpu_chart) * (all_cpu_charts_size - old_cpu_charts_size));
- }
- struct cpu_chart *cpu_chart = &all_cpu_charts[core];
-
- if(unlikely(!cpu_chart->st)) {
- cpu_chart->id = strdupz(id);
-
- if(unlikely(core == 0)) {
- title = "Total CPU utilization";
- type = "system";
- context = "system.cpu";
- family = id;
- priority = NETDATA_CHART_PRIO_SYSTEM_CPU;
- }
- else {
- title = "Core utilization";
- type = "cpu";
- context = "cpu.cpu";
- family = "utilization";
- priority = NETDATA_CHART_PRIO_CPU_PER_CORE;
-
- char filename[FILENAME_MAX + 1];
- struct stat stbuf;
-
- if(do_core_throttle_count != CONFIG_BOOLEAN_NO) {
- snprintfz(filename, FILENAME_MAX, core_throttle_count_filename, id);
- if (stat(filename, &stbuf) == 0) {
- cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
- cpu_chart->files[CORE_THROTTLE_COUNT_INDEX].fd = -1;
- do_core_throttle_count = CONFIG_BOOLEAN_YES;
- }
- }
-
- if(do_package_throttle_count != CONFIG_BOOLEAN_NO) {
- snprintfz(filename, FILENAME_MAX, package_throttle_count_filename, id);
- if (stat(filename, &stbuf) == 0) {
- cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].filename = strdupz(filename);
- cpu_chart->files[PACKAGE_THROTTLE_COUNT_INDEX].fd = -1;
- do_package_throttle_count = CONFIG_BOOLEAN_YES;
- }
- }
-
- if(do_cpu_freq != CONFIG_BOOLEAN_NO) {
-
- snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, id);
-
- if (stat(filename, &stbuf) == 0) {
- cpu_chart->files[CPU_FREQ_INDEX].filename = strdupz(filename);
- cpu_chart->files[CPU_FREQ_INDEX].fd = -1;
- do_cpu_freq = CONFIG_BOOLEAN_YES;
- }
-
- snprintfz(filename, FILENAME_MAX, time_in_state_filename, id);
-
- if (stat(filename, &stbuf) == 0) {
- cpu_chart->time_in_state_files.filename = strdupz(filename);
- cpu_chart->time_in_state_files.ff = NULL;
- do_cpu_freq = CONFIG_BOOLEAN_YES;
- accurate_freq_avail = 1;
- }
- }
- }
-
- cpu_chart->st = rrdset_create_localhost(
- type
- , id
- , NULL
- , family
- , context
- , title
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , priority + core
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- long multiplier = 1;
- long divisor = 1; // sysconf(_SC_CLK_TCK);
-
- cpu_chart->rd_guest_nice = rrddim_add(cpu_chart->st, "guest_nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_guest = rrddim_add(cpu_chart->st, "guest", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_steal = rrddim_add(cpu_chart->st, "steal", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_softirq = rrddim_add(cpu_chart->st, "softirq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_irq = rrddim_add(cpu_chart->st, "irq", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_user = rrddim_add(cpu_chart->st, "user", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_system = rrddim_add(cpu_chart->st, "system", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_nice = rrddim_add(cpu_chart->st, "nice", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_iowait = rrddim_add(cpu_chart->st, "iowait", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- cpu_chart->rd_idle = rrddim_add(cpu_chart->st, "idle", NULL, multiplier, divisor, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(cpu_chart->st, "idle");
-
- if (core > 0) {
- char cpu_core[50 + 1];
- snprintfz(cpu_core, 50, "cpu%lu", core - 1);
- rrdlabels_add(cpu_chart->st->rrdlabels, "cpu", cpu_core, RRDLABEL_SRC_AUTO);
- }
-
- if(unlikely(core == 0 && cpus_var == NULL))
- cpus_var = rrdvar_custom_host_variable_add_and_acquire(localhost, "active_processors");
- }
-
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_user, user);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_nice, nice);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_system, system);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_idle, idle);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_iowait, iowait);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_irq, irq);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_softirq, softirq);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_steal, steal);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest, guest);
- rrddim_set_by_pointer(cpu_chart->st, cpu_chart->rd_guest_nice, guest_nice);
- rrdset_done(cpu_chart->st);
- }
- }
- else if(unlikely(hash == hash_intr && strcmp(row_key, "intr") == 0)) {
- if(likely(do_interrupts)) {
- static RRDSET *st_intr = NULL;
- static RRDDIM *rd_interrupts = NULL;
- unsigned long long value = str2ull(procfile_lineword(ff, l, 1), NULL);
-
- if(unlikely(!st_intr)) {
- st_intr = rrdset_create_localhost(
- "system"
- , "intr"
- , NULL
- , "interrupts"
- , NULL
- , "CPU Interrupts"
- , "interrupts/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_INTR
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
-
- rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_intr, rd_interrupts, value);
- rrdset_done(st_intr);
- }
- }
- else if(unlikely(hash == hash_ctxt && strcmp(row_key, "ctxt") == 0)) {
- if(likely(do_context)) {
- static RRDSET *st_ctxt = NULL;
- static RRDDIM *rd_switches = NULL;
- unsigned long long value = str2ull(procfile_lineword(ff, l, 1), NULL);
-
- if(unlikely(!st_ctxt)) {
- st_ctxt = rrdset_create_localhost(
- "system"
- , "ctxt"
- , NULL
- , "processes"
- , NULL
- , "CPU Context Switches"
- , "context switches/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_CTXT
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_switches = rrddim_add(st_ctxt, "switches", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ctxt, rd_switches, value);
- rrdset_done(st_ctxt);
- }
- }
- else if(unlikely(hash == hash_processes && !processes && strcmp(row_key, "processes") == 0)) {
- processes = str2ull(procfile_lineword(ff, l, 1), NULL);
- }
- else if(unlikely(hash == hash_procs_running && !running && strcmp(row_key, "procs_running") == 0)) {
- running = str2ull(procfile_lineword(ff, l, 1), NULL);
- }
- else if(unlikely(hash == hash_procs_blocked && !blocked && strcmp(row_key, "procs_blocked") == 0)) {
- blocked = str2ull(procfile_lineword(ff, l, 1), NULL);
- }
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_forks)) {
- static RRDSET *st_forks = NULL;
- static RRDDIM *rd_started = NULL;
-
- if(unlikely(!st_forks)) {
- st_forks = rrdset_create_localhost(
- "system"
- , "forks"
- , NULL
- , "processes"
- , NULL
- , "Started Processes"
- , "processes/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_FORKS
- , update_every
- , RRDSET_TYPE_LINE
- );
- rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL);
-
- rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_forks, rd_started, processes);
- rrdset_done(st_forks);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_processes)) {
- static RRDSET *st_processes = NULL;
- static RRDDIM *rd_running = NULL;
- static RRDDIM *rd_blocked = NULL;
-
- if(unlikely(!st_processes)) {
- st_processes = rrdset_create_localhost(
- "system"
- , "processes"
- , NULL
- , "processes"
- , NULL
- , "System Processes"
- , "processes"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_PROCESSES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_running = rrddim_add(st_processes, "running", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_blocked = rrddim_add(st_processes, "blocked", NULL, -1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_processes, rd_running, running);
- rrddim_set_by_pointer(st_processes, rd_blocked, blocked);
- rrdset_done(st_processes);
- }
-
- if(likely(all_cpu_charts_size > 1)) {
- if(likely(do_core_throttle_count != CONFIG_BOOLEAN_NO)) {
- int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX);
- if(likely(r != -1 && (do_core_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
- do_core_throttle_count = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_core_throttle_count = NULL;
-
- if (unlikely(!st_core_throttle_count)) {
- st_core_throttle_count = rrdset_create_localhost(
- "cpu"
- , "core_throttling"
- , NULL
- , "throttling"
- , "cpu.core_throttling"
- , "Core Thermal Throttling Events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_CORE_THROTTLING
- , update_every
- , RRDSET_TYPE_LINE
- );
- }
-
- chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CORE_THROTTLE_COUNT_INDEX, st_core_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrdset_done(st_core_throttle_count);
- }
- }
-
- if(likely(do_package_throttle_count != CONFIG_BOOLEAN_NO)) {
- int r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX);
- if(likely(r != -1 && (do_package_throttle_count == CONFIG_BOOLEAN_YES || r > 0))) {
- do_package_throttle_count = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_package_throttle_count = NULL;
-
- if(unlikely(!st_package_throttle_count)) {
- st_package_throttle_count = rrdset_create_localhost(
- "cpu"
- , "package_throttling"
- , NULL
- , "throttling"
- , "cpu.package_throttling"
- , "Package Thermal Throttling Events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_PACKAGE_THROTTLING
- , update_every
- , RRDSET_TYPE_LINE
- );
- }
-
- chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, PACKAGE_THROTTLE_COUNT_INDEX, st_package_throttle_count, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrdset_done(st_package_throttle_count);
- }
- }
-
- if(likely(do_cpu_freq != CONFIG_BOOLEAN_NO)) {
- char filename[FILENAME_MAX + 1];
- int r = 0;
-
- if (accurate_freq_avail) {
- r = read_per_core_time_in_state_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX);
- if(r > 0 && !accurate_freq_is_used) {
- accurate_freq_is_used = 1;
- snprintfz(filename, FILENAME_MAX, time_in_state_filename, "cpu*");
- collector_info("cpufreq is using %s", filename);
- }
- }
- if (r < 1) {
- r = read_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX);
- if(accurate_freq_is_used) {
- accurate_freq_is_used = 0;
- snprintfz(filename, FILENAME_MAX, scaling_cur_freq_filename, "cpu*");
- collector_info("cpufreq fell back to %s", filename);
- }
- }
-
- if(likely(r != -1 && (do_cpu_freq == CONFIG_BOOLEAN_YES || r > 0))) {
- do_cpu_freq = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_scaling_cur_freq = NULL;
-
- if(unlikely(!st_scaling_cur_freq)) {
- st_scaling_cur_freq = rrdset_create_localhost(
- "cpu"
- , "cpufreq"
- , NULL
- , "cpufreq"
- , "cpufreq.cpufreq"
- , "Current CPU Frequency"
- , "MHz"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_CPUFREQ_SCALING_CUR_FREQ
- , update_every
- , RRDSET_TYPE_LINE
- );
- }
-
- chart_per_core_files(&all_cpu_charts[1], all_cpu_charts_size - 1, CPU_FREQ_INDEX, st_scaling_cur_freq, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- rrdset_done(st_scaling_cur_freq);
- }
- }
- }
-
- // --------------------------------------------------------------------
-
- static struct per_core_cpuidle_chart *cpuidle_charts = NULL;
- size_t schedstat_cores_found = 0;
-
- if(likely(do_cpuidle != CONFIG_BOOLEAN_NO && !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
- int cpu_states_updated = 0;
- size_t core, state;
-
-
- // proc.plugin runs on Linux systems only. Multi-platform compatibility is not needed here,
- // so bare pthread functions are used to avoid unneeded overheads.
- for(core = 0; core < schedstat_cores_found; core++) {
- if(unlikely(!(cpuidle_charts[core].active_time - cpuidle_charts[core].last_active_time))) {
- pthread_t thread;
- cpu_set_t global_cpu_set;
-
- if (likely(!pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &global_cpu_set))) {
- if (unlikely(!CPU_ISSET(core, &global_cpu_set))) {
- continue;
- }
- }
- else
- collector_error("Cannot read current process affinity");
-
- // These threads are very ephemeral and don't need to have a specific name
- if(unlikely(pthread_create(&thread, NULL, wake_cpu_thread, (void *)&core)))
- collector_error("Cannot create wake_cpu_thread");
- else if(unlikely(pthread_join(thread, NULL)))
- collector_error("Cannot join wake_cpu_thread");
- cpu_states_updated = 1;
- }
- }
-
- if(unlikely(!cpu_states_updated || !read_schedstat(schedstat_filename, &cpuidle_charts, &schedstat_cores_found))) {
- for(core = 0; core < schedstat_cores_found; core++) {
- cpuidle_charts[core].last_active_time = cpuidle_charts[core].active_time;
-
- int r = read_cpuidle_states(cpuidle_name_filename, cpuidle_time_filename, cpuidle_charts, core);
- if(likely(r != -1 && (do_cpuidle == CONFIG_BOOLEAN_YES || r > 0))) {
- do_cpuidle = CONFIG_BOOLEAN_YES;
-
- char cpuidle_chart_id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(cpuidle_chart_id, RRD_ID_LENGTH_MAX, "cpu%zu_cpuidle", core);
-
- if(unlikely(!cpuidle_charts[core].st)) {
- cpuidle_charts[core].st = rrdset_create_localhost(
- "cpu"
- , cpuidle_chart_id
- , NULL
- , "cpuidle"
- , "cpuidle.cpu_cstate_residency_time"
- , "C-state residency time"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_STAT_NAME
- , NETDATA_CHART_PRIO_CPUIDLE + core
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- char corebuf[50+1];
- snprintfz(corebuf, sizeof(corebuf) - 1, "cpu%zu", core);
- rrdlabels_add(cpuidle_charts[core].st->rrdlabels, "cpu", corebuf, RRDLABEL_SRC_AUTO);
-
- char cpuidle_dim_id[RRD_ID_LENGTH_MAX + 1];
- cpuidle_charts[core].active_time_rd = rrddim_add(cpuidle_charts[core].st, "active", "C0 (active)", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
- strncpyz(cpuidle_dim_id, cpuidle_charts[core].cpuidle_state[state].name, RRD_ID_LENGTH_MAX);
- for(int i = 0; cpuidle_dim_id[i]; i++)
- cpuidle_dim_id[i] = tolower(cpuidle_dim_id[i]);
- cpuidle_charts[core].cpuidle_state[state].rd = rrddim_add(cpuidle_charts[core].st, cpuidle_dim_id,
- cpuidle_charts[core].cpuidle_state[state].name,
- 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
- }
-
- rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].active_time_rd, cpuidle_charts[core].active_time);
- for(state = 0; state < cpuidle_charts[core].cpuidle_state_len; state++) {
- rrddim_set_by_pointer(cpuidle_charts[core].st, cpuidle_charts[core].cpuidle_state[state].rd, cpuidle_charts[core].cpuidle_state[state].value);
- }
- rrdset_done(cpuidle_charts[core].st);
- }
- }
- }
- }
-
- if(cpus_var)
- rrdvar_custom_host_variable_set(localhost, cpus_var, cores_found);
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_sys_fs_file_nr.c b/collectors/proc.plugin/proc_sys_fs_file_nr.c
deleted file mode 100644
index 570945d01..000000000
--- a/collectors/proc.plugin/proc_sys_fs_file_nr.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-int do_proc_sys_fs_file_nr(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/fs/file-nr");
- ff = procfile_open(config_get("plugin:proc:/proc/sys/fs/file-nr", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- uint64_t allocated = str2ull(procfile_lineword(ff, 0, 0), NULL);
- uint64_t unused = str2ull(procfile_lineword(ff, 0, 1), NULL);
- uint64_t max = str2ull(procfile_lineword(ff, 0, 2), NULL);
-
- uint64_t used = allocated - unused;
-
- static RRDSET *st_files = NULL;
- static RRDDIM *rd_used = NULL;
-
- if(unlikely(!st_files)) {
- st_files = rrdset_create_localhost(
- "system"
- , "file_nr_used"
- , NULL
- , "files"
- , NULL
- , "File Descriptors"
- , "files"
- , PLUGIN_PROC_NAME
- , "/proc/sys/fs/file-nr"
- , NETDATA_CHART_PRIO_SYSTEM_FILES_NR
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_used = rrddim_add(st_files, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_files, rd_used, (collected_number )used);
- rrdset_done(st_files);
-
- static RRDSET *st_files_utilization = NULL;
- static RRDDIM *rd_utilization = NULL;
-
- if(unlikely(!st_files_utilization)) {
- st_files_utilization = rrdset_create_localhost(
- "system"
- , "file_nr_utilization"
- , NULL
- , "files"
- , NULL
- , "File Descriptors Utilization"
- , "percentage"
- , PLUGIN_PROC_NAME
- , "/proc/sys/fs/file-nr"
- , NETDATA_CHART_PRIO_SYSTEM_FILES_NR + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_utilization = rrddim_add(st_files_utilization, "utilization", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- NETDATA_DOUBLE d_used = (NETDATA_DOUBLE)used;
- NETDATA_DOUBLE d_max = (NETDATA_DOUBLE)max;
- NETDATA_DOUBLE percent = d_used * 100.0 / d_max;
-
- rrddim_set_by_pointer(st_files_utilization, rd_utilization, (collected_number)(percent * 10000));
- rrdset_done(st_files_utilization);
-
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c b/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
deleted file mode 100644
index b32597bc4..000000000
--- a/collectors/proc.plugin/proc_sys_kernel_random_entropy_avail.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-int do_proc_sys_kernel_random_entropy_avail(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/sys/kernel/random/entropy_avail");
- ff = procfile_open(config_get("plugin:proc:/proc/sys/kernel/random/entropy_avail", "filename to monitor", filename), "", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- unsigned long long entropy = str2ull(procfile_lineword(ff, 0, 0), NULL);
-
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if(unlikely(!st)) {
- st = rrdset_create_localhost(
- "system"
- , "entropy"
- , NULL
- , "entropy"
- , NULL
- , "Available Entropy"
- , "entropy"
- , PLUGIN_PROC_NAME
- , "/proc/sys/kernel/random/entropy_avail"
- , NETDATA_CHART_PRIO_SYSTEM_ENTROPY
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "entropy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, entropy);
- rrdset_done(st);
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_uptime.c b/collectors/proc.plugin/proc_uptime.c
deleted file mode 100644
index ddab7269b..000000000
--- a/collectors/proc.plugin/proc_uptime.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-int do_proc_uptime(int update_every, usec_t dt) {
- (void)dt;
-
- static char *uptime_filename = NULL;
- if(!uptime_filename) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
-
- uptime_filename = config_get("plugin:proc:/proc/uptime", "filename to monitor", filename);
- }
-
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if(unlikely(!st)) {
-
- st = rrdset_create_localhost(
- "system"
- , "uptime"
- , NULL
- , "uptime"
- , NULL
- , "System Uptime"
- , "seconds"
- , PLUGIN_PROC_NAME
- , "/proc/uptime"
- , NETDATA_CHART_PRIO_SYSTEM_UPTIME
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd, uptime_msec(uptime_filename));
- rrdset_done(st);
- return 0;
-}
diff --git a/collectors/proc.plugin/proc_vmstat.c b/collectors/proc.plugin/proc_vmstat.c
deleted file mode 100644
index b44733b6a..000000000
--- a/collectors/proc.plugin/proc_vmstat.c
+++ /dev/null
@@ -1,810 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_VMSTAT_NAME "/proc/vmstat"
-
-#define OOM_KILL_STRING "oom_kill"
-
-int do_proc_vmstat(int update_every, usec_t dt) {
- (void)dt;
-
- static procfile *ff = NULL;
- static int do_swapio = -1, do_io = -1, do_pgfaults = -1, do_oom_kill = -1, do_numa = -1, do_thp = -1, do_zswapio = -1, do_balloon = -1, do_ksm = -1;
- static int has_numa = -1;
-
- static ARL_BASE *arl_base = NULL;
- static unsigned long long numa_foreign = 0ULL;
- static unsigned long long numa_hint_faults = 0ULL;
- static unsigned long long numa_hint_faults_local = 0ULL;
- static unsigned long long numa_huge_pte_updates = 0ULL;
- static unsigned long long numa_interleave = 0ULL;
- static unsigned long long numa_local = 0ULL;
- static unsigned long long numa_other = 0ULL;
- static unsigned long long numa_pages_migrated = 0ULL;
- static unsigned long long numa_pte_updates = 0ULL;
- static unsigned long long pgfault = 0ULL;
- static unsigned long long pgmajfault = 0ULL;
- static unsigned long long pgpgin = 0ULL;
- static unsigned long long pgpgout = 0ULL;
- static unsigned long long pswpin = 0ULL;
- static unsigned long long pswpout = 0ULL;
- static unsigned long long oom_kill = 0ULL;
-
- // THP page migration
-// static unsigned long long pgmigrate_success = 0ULL;
-// static unsigned long long pgmigrate_fail = 0ULL;
-// static unsigned long long thp_migration_success = 0ULL;
-// static unsigned long long thp_migration_fail = 0ULL;
-// static unsigned long long thp_migration_split = 0ULL;
-
- // Compaction cost model
- // https://lore.kernel.org/lkml/20121022080525.GB2198@suse.de/
-// static unsigned long long compact_migrate_scanned = 0ULL;
-// static unsigned long long compact_free_scanned = 0ULL;
-// static unsigned long long compact_isolated = 0ULL;
-
- // THP defragmentation
- static unsigned long long compact_stall = 0ULL; // incremented when an application stalls allocating THP
- static unsigned long long compact_fail = 0ULL; // defragmentation events that failed
- static unsigned long long compact_success = 0ULL; // defragmentation events that succeeded
-
- // ?
-// static unsigned long long compact_daemon_wake = 0ULL;
-// static unsigned long long compact_daemon_migrate_scanned = 0ULL;
-// static unsigned long long compact_daemon_free_scanned = 0ULL;
-
- // ?
-// static unsigned long long htlb_buddy_alloc_success = 0ULL;
-// static unsigned long long htlb_buddy_alloc_fail = 0ULL;
-
- // ?
-// static unsigned long long cma_alloc_success = 0ULL;
-// static unsigned long long cma_alloc_fail = 0ULL;
-
- // ?
-// static unsigned long long unevictable_pgs_culled = 0ULL;
-// static unsigned long long unevictable_pgs_scanned = 0ULL;
-// static unsigned long long unevictable_pgs_rescued = 0ULL;
-// static unsigned long long unevictable_pgs_mlocked = 0ULL;
-// static unsigned long long unevictable_pgs_munlocked = 0ULL;
-// static unsigned long long unevictable_pgs_cleared = 0ULL;
-// static unsigned long long unevictable_pgs_stranded = 0ULL;
-
- // THP handling of page faults
- static unsigned long long thp_fault_alloc = 0ULL; // is incremented every time a huge page is successfully allocated to handle a page fault. This applies to both the first time a page is faulted and for COW faults.
- static unsigned long long thp_fault_fallback = 0ULL; // is incremented if a page fault fails to allocate a huge page and instead falls back to using small pages.
- static unsigned long long thp_fault_fallback_charge = 0ULL; // is incremented if a page fault fails to charge a huge page and instead falls back to using small pages even though the allocation was successful.
-
- // khugepaged collapsing of small pages into huge pages
- static unsigned long long thp_collapse_alloc = 0ULL; // is incremented by khugepaged when it has found a range of pages to collapse into one huge page and has successfully allocated a new huge page to store the data.
- static unsigned long long thp_collapse_alloc_failed = 0ULL; // is incremented if khugepaged found a range of pages that should be collapsed into one huge page but failed the allocation.
-
- // THP handling of file allocations
- static unsigned long long thp_file_alloc = 0ULL; // is incremented every time a file huge page is successfully allocated
- static unsigned long long thp_file_fallback = 0ULL; // is incremented if a file huge page is attempted to be allocated but fails and instead falls back to using small pages
- static unsigned long long thp_file_fallback_charge = 0ULL; // is incremented if a file huge page cannot be charged and instead falls back to using small pages even though the allocation was successful
- static unsigned long long thp_file_mapped = 0ULL; // is incremented every time a file huge page is mapped into user address space
-
- // THP splitting of huge pages into small pages
- static unsigned long long thp_split_page = 0ULL;
- static unsigned long long thp_split_page_failed = 0ULL;
- static unsigned long long thp_deferred_split_page = 0ULL; // is incremented when a huge page is put onto split queue. This happens when a huge page is partially unmapped and splitting it would free up some memory. Pages on split queue are going to be split under memory pressure
- static unsigned long long thp_split_pmd = 0ULL; // is incremented every time a PMD split into table of PTEs. This can happen, for instance, when application calls mprotect() or munmap() on part of huge page. It doesn’t split huge page, only page table entry
-
- // ?
-// static unsigned long long thp_scan_exceed_none_pte = 0ULL;
-// static unsigned long long thp_scan_exceed_swap_pte = 0ULL;
-// static unsigned long long thp_scan_exceed_share_pte = 0ULL;
-// static unsigned long long thp_split_pud = 0ULL;
-
- // THP Zero Huge Page
- static unsigned long long thp_zero_page_alloc = 0ULL; // is incremented every time a huge zero page used for thp is successfully allocated. Note, it doesn’t count every map of the huge zero page, only its allocation
- static unsigned long long thp_zero_page_alloc_failed = 0ULL; // is incremented if kernel fails to allocate huge zero page and falls back to using small pages
-
- // THP Swap Out
- static unsigned long long thp_swpout = 0ULL; // is incremented every time a huge page is swapout in one piece without splitting
- static unsigned long long thp_swpout_fallback = 0ULL; // is incremented if a huge page has to be split before swapout. Usually because failed to allocate some continuous swap space for the huge page
-
- // memory ballooning
- // Current size of balloon is (balloon_inflate - balloon_deflate) pages
- static unsigned long long balloon_inflate = 0ULL;
- static unsigned long long balloon_deflate = 0ULL;
- static unsigned long long balloon_migrate = 0ULL;
-
- // ?
-// static unsigned long long swap_ra = 0ULL;
-// static unsigned long long swap_ra_hit = 0ULL;
-
- static unsigned long long ksm_swpin_copy = 0ULL; // is incremented every time a KSM page is copied when swapping in
- static unsigned long long cow_ksm = 0ULL; // is incremented every time a KSM page triggers copy on write (COW) when users try to write to a KSM page, we have to make a copy
-
- // zswap
- static unsigned long long zswpin = 0ULL;
- static unsigned long long zswpout = 0ULL;
-
- // ?
-// static unsigned long long direct_map_level2_splits = 0ULL;
-// static unsigned long long direct_map_level3_splits = 0ULL;
-// static unsigned long long nr_unstable = 0ULL;
-
- if(unlikely(!ff)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/vmstat");
- ff = procfile_open(config_get("plugin:proc:/proc/vmstat", "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 0; // we return 0, so that we will retry to open it next time
-
- size_t lines = procfile_lines(ff), l;
-
- if(unlikely(!arl_base)) {
- do_swapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "swap i/o", CONFIG_BOOLEAN_AUTO);
- do_io = config_get_boolean("plugin:proc:/proc/vmstat", "disk i/o", CONFIG_BOOLEAN_YES);
- do_pgfaults = config_get_boolean("plugin:proc:/proc/vmstat", "memory page faults", CONFIG_BOOLEAN_YES);
- do_oom_kill = config_get_boolean("plugin:proc:/proc/vmstat", "out of memory kills", CONFIG_BOOLEAN_AUTO);
- do_numa = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "system-wide numa metric summary", CONFIG_BOOLEAN_AUTO);
- do_thp = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "transparent huge pages", CONFIG_BOOLEAN_AUTO);
- do_zswapio = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "zswap i/o", CONFIG_BOOLEAN_AUTO);
- do_balloon = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "memory ballooning", CONFIG_BOOLEAN_AUTO);
- do_ksm = config_get_boolean_ondemand("plugin:proc:/proc/vmstat", "kernel same memory", CONFIG_BOOLEAN_AUTO);
-
- arl_base = arl_create("vmstat", NULL, 60);
- arl_expect(arl_base, "pgfault", &pgfault);
- arl_expect(arl_base, "pgmajfault", &pgmajfault);
- arl_expect(arl_base, "pgpgin", &pgpgin);
- arl_expect(arl_base, "pgpgout", &pgpgout);
- arl_expect(arl_base, "pswpin", &pswpin);
- arl_expect(arl_base, "pswpout", &pswpout);
-
- int has_oom_kill = 0;
-
- for (l = 0; l < lines; l++) {
- if (!strcmp(procfile_lineword(ff, l, 0), OOM_KILL_STRING)) {
- has_oom_kill = 1;
- break;
- }
- }
-
- if (has_oom_kill)
- arl_expect(arl_base, OOM_KILL_STRING, &oom_kill);
- else
- do_oom_kill = CONFIG_BOOLEAN_NO;
-
- if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO &&
- (get_numa_node_count() >= 2 ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- arl_expect(arl_base, "numa_foreign", &numa_foreign);
- arl_expect(arl_base, "numa_hint_faults_local", &numa_hint_faults_local);
- arl_expect(arl_base, "numa_hint_faults", &numa_hint_faults);
- arl_expect(arl_base, "numa_huge_pte_updates", &numa_huge_pte_updates);
- arl_expect(arl_base, "numa_interleave", &numa_interleave);
- arl_expect(arl_base, "numa_local", &numa_local);
- arl_expect(arl_base, "numa_other", &numa_other);
- arl_expect(arl_base, "numa_pages_migrated", &numa_pages_migrated);
- arl_expect(arl_base, "numa_pte_updates", &numa_pte_updates);
- }
- else {
- // Do not expect numa metrics when they are not needed.
- // By not adding them, the ARL will stop processing the file
- // when all the expected metrics are collected.
- // Also ARL will not parse their values.
- has_numa = 0;
- do_numa = CONFIG_BOOLEAN_NO;
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || do_thp == CONFIG_BOOLEAN_AUTO) {
-// arl_expect(arl_base, "pgmigrate_success", &pgmigrate_success);
-// arl_expect(arl_base, "pgmigrate_fail", &pgmigrate_fail);
-// arl_expect(arl_base, "thp_migration_success", &thp_migration_success);
-// arl_expect(arl_base, "thp_migration_fail", &thp_migration_fail);
-// arl_expect(arl_base, "thp_migration_split", &thp_migration_split);
-// arl_expect(arl_base, "compact_migrate_scanned", &compact_migrate_scanned);
-// arl_expect(arl_base, "compact_free_scanned", &compact_free_scanned);
-// arl_expect(arl_base, "compact_isolated", &compact_isolated);
- arl_expect(arl_base, "compact_stall", &compact_stall);
- arl_expect(arl_base, "compact_fail", &compact_fail);
- arl_expect(arl_base, "compact_success", &compact_success);
-// arl_expect(arl_base, "compact_daemon_wake", &compact_daemon_wake);
-// arl_expect(arl_base, "compact_daemon_migrate_scanned", &compact_daemon_migrate_scanned);
-// arl_expect(arl_base, "compact_daemon_free_scanned", &compact_daemon_free_scanned);
- arl_expect(arl_base, "thp_fault_alloc", &thp_fault_alloc);
- arl_expect(arl_base, "thp_fault_fallback", &thp_fault_fallback);
- arl_expect(arl_base, "thp_fault_fallback_charge", &thp_fault_fallback_charge);
- arl_expect(arl_base, "thp_collapse_alloc", &thp_collapse_alloc);
- arl_expect(arl_base, "thp_collapse_alloc_failed", &thp_collapse_alloc_failed);
- arl_expect(arl_base, "thp_file_alloc", &thp_file_alloc);
- arl_expect(arl_base, "thp_file_fallback", &thp_file_fallback);
- arl_expect(arl_base, "thp_file_fallback_charge", &thp_file_fallback_charge);
- arl_expect(arl_base, "thp_file_mapped", &thp_file_mapped);
- arl_expect(arl_base, "thp_split_page", &thp_split_page);
- arl_expect(arl_base, "thp_split_page_failed", &thp_split_page_failed);
- arl_expect(arl_base, "thp_deferred_split_page", &thp_deferred_split_page);
- arl_expect(arl_base, "thp_split_pmd", &thp_split_pmd);
- arl_expect(arl_base, "thp_zero_page_alloc", &thp_zero_page_alloc);
- arl_expect(arl_base, "thp_zero_page_alloc_failed", &thp_zero_page_alloc_failed);
- arl_expect(arl_base, "thp_swpout", &thp_swpout);
- arl_expect(arl_base, "thp_swpout_fallback", &thp_swpout_fallback);
- }
-
- if(do_balloon == CONFIG_BOOLEAN_YES || do_balloon == CONFIG_BOOLEAN_AUTO) {
- arl_expect(arl_base, "balloon_inflate", &balloon_inflate);
- arl_expect(arl_base, "balloon_deflate", &balloon_deflate);
- arl_expect(arl_base, "balloon_migrate", &balloon_migrate);
- }
-
- if(do_ksm == CONFIG_BOOLEAN_YES || do_ksm == CONFIG_BOOLEAN_AUTO) {
- arl_expect(arl_base, "ksm_swpin_copy", &ksm_swpin_copy);
- arl_expect(arl_base, "cow_ksm", &cow_ksm);
- }
-
- if(do_zswapio == CONFIG_BOOLEAN_YES || do_zswapio == CONFIG_BOOLEAN_AUTO) {
- arl_expect(arl_base, "zswpin", &zswpin);
- arl_expect(arl_base, "zswpout", &zswpout);
- }
- }
-
- arl_begin(arl_base);
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- if(unlikely(words < 2)) {
- if(unlikely(words)) collector_error("Cannot read /proc/vmstat line %zu. Expected 2 params, read %zu.", l, words);
- continue;
- }
-
- if(unlikely(arl_check(arl_base,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- // --------------------------------------------------------------------
-
- if(do_swapio == CONFIG_BOOLEAN_YES || (do_swapio == CONFIG_BOOLEAN_AUTO &&
- (pswpin || pswpout ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_swapio = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_swapio = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_swapio)) {
- st_swapio = rrdset_create_localhost(
- "mem"
- , "swapio"
- , NULL
- , "swap"
- , NULL
- , "Swap I/O"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_SWAPIO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_swapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_swapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_swapio, rd_in, pswpin);
- rrddim_set_by_pointer(st_swapio, rd_out, pswpout);
- rrdset_done(st_swapio);
- }
-
- // --------------------------------------------------------------------
-
- if(do_io) {
- static RRDSET *st_io = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_io)) {
- st_io = rrdset_create_localhost(
- "system"
- , "pgpgio"
- , NULL
- , "disk"
- , NULL
- , "Memory Paged from/to disk"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_SYSTEM_PGPGIO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_io, "in", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_io, "out", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_io, rd_in, pgpgin);
- rrddim_set_by_pointer(st_io, rd_out, pgpgout);
- rrdset_done(st_io);
- }
-
- // --------------------------------------------------------------------
-
- if(do_pgfaults) {
- static RRDSET *st_pgfaults = NULL;
- static RRDDIM *rd_minor = NULL, *rd_major = NULL;
-
- if(unlikely(!st_pgfaults)) {
- st_pgfaults = rrdset_create_localhost(
- "mem"
- , "pgfaults"
- , NULL
- , "page faults"
- , NULL
- , "Memory Page Faults"
- , "faults/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
-
- rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_pgfaults, rd_minor, pgfault);
- rrddim_set_by_pointer(st_pgfaults, rd_major, pgmajfault);
- rrdset_done(st_pgfaults);
- }
-
- // --------------------------------------------------------------------
-
- if (do_oom_kill == CONFIG_BOOLEAN_YES ||
- (do_oom_kill == CONFIG_BOOLEAN_AUTO && (oom_kill || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- static RRDSET *st_oom_kill = NULL;
- static RRDDIM *rd_oom_kill = NULL;
-
- do_oom_kill = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!st_oom_kill)) {
- st_oom_kill = rrdset_create_localhost(
- "mem"
- , "oom_kill"
- , NULL
- , "OOM kills"
- , NULL
- , "Out of Memory Kills"
- , "kills/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_SYSTEM_OOM_KILL
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_oom_kill, RRDSET_FLAG_DETAIL);
-
- rd_oom_kill = rrddim_add(st_oom_kill, "kills", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_oom_kill, rd_oom_kill, oom_kill);
- rrdset_done(st_oom_kill);
- }
-
- // --------------------------------------------------------------------
-
- // Ondemand criteria for NUMA. Since this won't change at run time, we
- // check it only once. We check whether the node count is >= 2 because
- // single-node systems have uninteresting statistics (since all accesses
- // are local).
- if(unlikely(has_numa == -1))
-
- has_numa = (numa_local || numa_foreign || numa_interleave || numa_other || numa_pte_updates ||
- numa_huge_pte_updates || numa_hint_faults || numa_hint_faults_local || numa_pages_migrated) ? 1 : 0;
-
- if(do_numa == CONFIG_BOOLEAN_YES || (do_numa == CONFIG_BOOLEAN_AUTO && has_numa)) {
- do_numa = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_numa = NULL;
- static RRDDIM *rd_local = NULL, *rd_foreign = NULL, *rd_interleave = NULL, *rd_other = NULL, *rd_pte_updates = NULL, *rd_huge_pte_updates = NULL, *rd_hint_faults = NULL, *rd_hint_faults_local = NULL, *rd_pages_migrated = NULL;
-
- if(unlikely(!st_numa)) {
- st_numa = rrdset_create_localhost(
- "mem"
- , "numa"
- , NULL
- , "numa"
- , NULL
- , "NUMA events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_NUMA
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
-
- // These depend on CONFIG_NUMA in the kernel.
- rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_interleave = rrddim_add(st_numa, "interleave", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_other = rrddim_add(st_numa, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- // The following stats depend on CONFIG_NUMA_BALANCING in the
- // kernel.
- rd_pte_updates = rrddim_add(st_numa, "pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_huge_pte_updates = rrddim_add(st_numa, "huge_pte_updates", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_hint_faults = rrddim_add(st_numa, "hint_faults", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_hint_faults_local = rrddim_add(st_numa, "hint_faults_local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_pages_migrated = rrddim_add(st_numa, "pages_migrated", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_numa, rd_local, numa_local);
- rrddim_set_by_pointer(st_numa, rd_foreign, numa_foreign);
- rrddim_set_by_pointer(st_numa, rd_interleave, numa_interleave);
- rrddim_set_by_pointer(st_numa, rd_other, numa_other);
-
- rrddim_set_by_pointer(st_numa, rd_pte_updates, numa_pte_updates);
- rrddim_set_by_pointer(st_numa, rd_huge_pte_updates, numa_huge_pte_updates);
- rrddim_set_by_pointer(st_numa, rd_hint_faults, numa_hint_faults);
- rrddim_set_by_pointer(st_numa, rd_hint_faults_local, numa_hint_faults_local);
- rrddim_set_by_pointer(st_numa, rd_pages_migrated, numa_pages_migrated);
-
- rrdset_done(st_numa);
- }
-
- // --------------------------------------------------------------------
-
- if(do_balloon == CONFIG_BOOLEAN_YES || (do_balloon == CONFIG_BOOLEAN_AUTO && (balloon_inflate || balloon_deflate ||
- balloon_migrate || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_balloon = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_balloon = NULL;
- static RRDDIM *rd_inflate = NULL, *rd_deflate = NULL, *rd_migrate = NULL;
-
- if(unlikely(!st_balloon)) {
- st_balloon = rrdset_create_localhost(
- "mem"
- , "balloon"
- , NULL
- , "balloon"
- , NULL
- , "Memory Ballooning Operations"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_BALLOON
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_inflate = rrddim_add(st_balloon, "inflate", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_deflate = rrddim_add(st_balloon, "deflate", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_migrate = rrddim_add(st_balloon, "migrate", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_balloon, rd_inflate, balloon_inflate);
- rrddim_set_by_pointer(st_balloon, rd_deflate, balloon_deflate);
- rrddim_set_by_pointer(st_balloon, rd_migrate, balloon_migrate);
-
- rrdset_done(st_balloon);
- }
-
- // --------------------------------------------------------------------
-
- if(do_zswapio == CONFIG_BOOLEAN_YES || (do_zswapio == CONFIG_BOOLEAN_AUTO &&
- (zswpin || zswpout ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_zswapio = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_zswapio = NULL;
- static RRDDIM *rd_in = NULL, *rd_out = NULL;
-
- if(unlikely(!st_zswapio)) {
- st_zswapio = rrdset_create_localhost(
- "mem"
- , "zswapio"
- , NULL
- , "zswap"
- , NULL
- , "ZSwap I/O"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_ZSWAPIO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_in = rrddim_add(st_zswapio, "in", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_out = rrddim_add(st_zswapio, "out", NULL, -sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_zswapio, rd_in, zswpin);
- rrddim_set_by_pointer(st_zswapio, rd_out, zswpout);
- rrdset_done(st_zswapio);
- }
-
- // --------------------------------------------------------------------
-
- if(do_ksm == CONFIG_BOOLEAN_YES || (do_ksm == CONFIG_BOOLEAN_AUTO &&
- (cow_ksm || ksm_swpin_copy ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_ksm = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ksm_cow = NULL;
- static RRDDIM *rd_swapin = NULL, *rd_write = NULL;
-
- if(unlikely(!st_ksm_cow)) {
- st_ksm_cow = rrdset_create_localhost(
- "mem"
- , "ksm_cow"
- , NULL
- , "ksm"
- , NULL
- , "KSM Copy On Write Operations"
- , "KiB/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_KSM_COW
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_swapin = rrddim_add(st_ksm_cow, "swapin", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_write = rrddim_add(st_ksm_cow, "write", NULL, sysconf(_SC_PAGESIZE), 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ksm_cow, rd_swapin, ksm_swpin_copy);
- rrddim_set_by_pointer(st_ksm_cow, rd_write, cow_ksm);
-
- rrdset_done(st_ksm_cow);
- }
-
- // --------------------------------------------------------------------
-
- if(do_thp == CONFIG_BOOLEAN_YES || do_thp == CONFIG_BOOLEAN_AUTO) {
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_fault_alloc || thp_fault_fallback || thp_fault_fallback_charge))) {
-
- static RRDSET *st_thp_fault = NULL;
- static RRDDIM *rd_alloc = NULL, *rd_fallback = NULL, *rd_fallback_charge = NULL;
-
- if(unlikely(!st_thp_fault)) {
- st_thp_fault = rrdset_create_localhost(
- "mem"
- , "thp_faults"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Page Fault Allocations"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_FAULTS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_alloc = rrddim_add(st_thp_fault, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fallback = rrddim_add(st_thp_fault, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fallback_charge = rrddim_add(st_thp_fault, "fallback_charge", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_thp_fault, rd_alloc, thp_fault_alloc);
- rrddim_set_by_pointer(st_thp_fault, rd_fallback, thp_fault_fallback);
- rrddim_set_by_pointer(st_thp_fault, rd_fallback_charge, thp_fault_fallback_charge);
-
- rrdset_done(st_thp_fault);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_fault_alloc || thp_fault_fallback || thp_fault_fallback_charge || thp_file_mapped))) {
-
- static RRDSET *st_thp_file = NULL;
- static RRDDIM *rd_alloc = NULL, *rd_fallback = NULL, *rd_fallback_charge = NULL, *rd_mapped = NULL;
-
- if(unlikely(!st_thp_file)) {
- st_thp_file = rrdset_create_localhost(
- "mem"
- , "thp_file"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Page File Allocations"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_FILE
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_alloc = rrddim_add(st_thp_file, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fallback = rrddim_add(st_thp_file, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mapped = rrddim_add(st_thp_file, "mapped", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fallback_charge = rrddim_add(st_thp_file, "fallback_charge", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_thp_file, rd_alloc, thp_file_alloc);
- rrddim_set_by_pointer(st_thp_file, rd_fallback, thp_file_fallback);
- rrddim_set_by_pointer(st_thp_file, rd_mapped, thp_file_fallback_charge);
- rrddim_set_by_pointer(st_thp_file, rd_fallback_charge, thp_file_fallback_charge);
-
- rrdset_done(st_thp_file);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_zero_page_alloc || thp_zero_page_alloc_failed))) {
-
- static RRDSET *st_thp_zero = NULL;
- static RRDDIM *rd_alloc = NULL, *rd_failed = NULL;
-
- if(unlikely(!st_thp_zero)) {
- st_thp_zero = rrdset_create_localhost(
- "mem"
- , "thp_zero"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Zero Page Allocations"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_ZERO
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_alloc = rrddim_add(st_thp_zero, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_thp_zero, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_thp_zero, rd_alloc, thp_zero_page_alloc);
- rrddim_set_by_pointer(st_thp_zero, rd_failed, thp_zero_page_alloc_failed);
-
- rrdset_done(st_thp_zero);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_collapse_alloc || thp_collapse_alloc_failed))) {
-
- static RRDSET *st_khugepaged = NULL;
- static RRDDIM *rd_alloc = NULL, *rd_failed = NULL;
-
- if(unlikely(!st_khugepaged)) {
- st_khugepaged = rrdset_create_localhost(
- "mem"
- , "thp_collapse"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Pages Collapsed by khugepaged"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_KHUGEPAGED
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_alloc = rrddim_add(st_khugepaged, "alloc", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_khugepaged, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_khugepaged, rd_alloc, thp_collapse_alloc);
- rrddim_set_by_pointer(st_khugepaged, rd_failed, thp_collapse_alloc_failed);
-
- rrdset_done(st_khugepaged);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_split_page || thp_split_page_failed || thp_deferred_split_page || thp_split_pmd))) {
-
- static RRDSET *st_thp_split = NULL;
- static RRDDIM *rd_split = NULL, *rd_failed = NULL, *rd_deferred_split = NULL, *rd_split_pmd = NULL;
-
- if(unlikely(!st_thp_split)) {
- st_thp_split = rrdset_create_localhost(
- "mem"
- , "thp_split"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Page Splits"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_SPLITS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_split = rrddim_add(st_thp_split, "split", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_failed = rrddim_add(st_thp_split, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_split_pmd = rrddim_add(st_thp_split, "split_pmd", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_deferred_split = rrddim_add(st_thp_split, "split_deferred", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_thp_split, rd_split, thp_split_page);
- rrddim_set_by_pointer(st_thp_split, rd_failed, thp_split_page_failed);
- rrddim_set_by_pointer(st_thp_split, rd_split_pmd, thp_split_pmd);
- rrddim_set_by_pointer(st_thp_split, rd_deferred_split, thp_deferred_split_page);
-
- rrdset_done(st_thp_split);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || thp_swpout || thp_swpout_fallback))) {
-
- static RRDSET *st_tmp_swapout = NULL;
- static RRDDIM *rd_swapout = NULL, *rd_fallback = NULL;
-
- if(unlikely(!st_tmp_swapout)) {
- st_tmp_swapout = rrdset_create_localhost(
- "mem"
- , "thp_swapout"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Pages Swap Out"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_SWAPOUT
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_swapout = rrddim_add(st_tmp_swapout, "swapout", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fallback = rrddim_add(st_tmp_swapout, "fallback", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_tmp_swapout, rd_swapout, thp_swpout);
- rrddim_set_by_pointer(st_tmp_swapout, rd_fallback, thp_swpout_fallback);
-
- rrdset_done(st_tmp_swapout);
- }
-
- if(do_thp == CONFIG_BOOLEAN_YES || (do_thp == CONFIG_BOOLEAN_AUTO &&
- (netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES || compact_stall || compact_fail || compact_success))) {
-
- static RRDSET *st_thp_compact = NULL;
- static RRDDIM *rd_success = NULL, *rd_fail = NULL, *rd_stall = NULL;
-
- if(unlikely(!st_thp_compact)) {
- st_thp_compact = rrdset_create_localhost(
- "mem"
- , "thp_compact"
- , NULL
- , "hugepages"
- , NULL
- , "Transparent Huge Pages Compaction"
- , "events/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_VMSTAT_NAME
- , NETDATA_CHART_PRIO_MEM_HUGEPAGES_COMPACT
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_success = rrddim_add(st_thp_compact, "success", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_fail = rrddim_add(st_thp_compact, "fail", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_stall = rrddim_add(st_thp_compact, "stall", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_thp_compact, rd_success, compact_success);
- rrddim_set_by_pointer(st_thp_compact, rd_fail, compact_fail);
- rrddim_set_by_pointer(st_thp_compact, rd_stall, compact_stall);
-
- rrdset_done(st_thp_compact);
- }
- }
-
- return 0;
-}
-
diff --git a/collectors/proc.plugin/sys_block_zram.c b/collectors/proc.plugin/sys_block_zram.c
deleted file mode 100644
index dac7cac0f..000000000
--- a/collectors/proc.plugin/sys_block_zram.c
+++ /dev/null
@@ -1,285 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_ZRAM_NAME "/sys/block/zram"
-#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
-
-typedef struct mm_stat {
- unsigned long long orig_data_size;
- unsigned long long compr_data_size;
- unsigned long long mem_used_total;
- unsigned long long mem_limit;
- unsigned long long mem_used_max;
- unsigned long long same_pages;
- unsigned long long pages_compacted;
-} MM_STAT;
-
-typedef struct zram_device {
- procfile *file;
-
- RRDSET *st_usage;
- RRDDIM *rd_compr_data_size;
- RRDDIM *rd_metadata_size;
-
- RRDSET *st_savings;
- RRDDIM *rd_original_size;
- RRDDIM *rd_savings_size;
-
- RRDSET *st_comp_ratio;
- RRDDIM *rd_comp_ratio;
-
- RRDSET *st_alloc_efficiency;
- RRDDIM *rd_alloc_efficiency;
-} ZRAM_DEVICE;
-
-static int try_get_zram_major_number(procfile *file) {
- size_t i;
- unsigned int lines = procfile_lines(file);
- int id = -1;
- char *name = NULL;
- for (i = 0; i < lines; i++)
- {
- if (procfile_linewords(file, i) < 2)
- continue;
- name = procfile_lineword(file, i, 1);
- if (strcmp(name, "zram") == 0)
- {
- id = str2i(procfile_lineword(file, i, 0));
- if (id == 0)
- return -1;
- return id;
- }
- }
- return -1;
-}
-
-static inline void init_rrd(const char *name, ZRAM_DEVICE *d, int update_every) {
- char chart_name[RRD_ID_LENGTH_MAX + 1];
-
- snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_usage.%s", name);
- d->st_usage = rrdset_create_localhost(
- "mem"
- , chart_name
- , chart_name
- , name
- , "mem.zram_usage"
- , "ZRAM Memory Usage"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_ZRAM_NAME
- , NETDATA_CHART_PRIO_MEM_ZRAM
- , update_every
- , RRDSET_TYPE_AREA);
- d->rd_compr_data_size = rrddim_add(d->st_usage, "compressed", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_metadata_size = rrddim_add(d->st_usage, "metadata", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrdlabels_add(d->st_usage->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
-
- snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_savings.%s", name);
- d->st_savings = rrdset_create_localhost(
- "mem"
- , chart_name
- , chart_name
- , name
- , "mem.zram_savings"
- , "ZRAM Memory Savings"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_ZRAM_NAME
- , NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS
- , update_every
- , RRDSET_TYPE_AREA);
- d->rd_savings_size = rrddim_add(d->st_savings, "savings", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_original_size = rrddim_add(d->st_savings, "original", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rrdlabels_add(d->st_savings->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
-
- snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_ratio.%s", name);
- d->st_comp_ratio = rrdset_create_localhost(
- "mem"
- , chart_name
- , chart_name
- , name
- , "mem.zram_ratio"
- , "ZRAM Compression Ratio (original to compressed)"
- , "ratio"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_ZRAM_NAME
- , NETDATA_CHART_PRIO_MEM_ZRAM_RATIO
- , update_every
- , RRDSET_TYPE_LINE);
- d->rd_comp_ratio = rrddim_add(d->st_comp_ratio, "ratio", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
- rrdlabels_add(d->st_comp_ratio->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
-
- snprintfz(chart_name, RRD_ID_LENGTH_MAX, "zram_efficiency.%s", name);
- d->st_alloc_efficiency = rrdset_create_localhost(
- "mem"
- , chart_name
- , chart_name
- , name
- , "mem.zram_efficiency"
- , "ZRAM Efficiency"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_ZRAM_NAME
- , NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY
- , update_every
- , RRDSET_TYPE_LINE);
- d->rd_alloc_efficiency = rrddim_add(d->st_alloc_efficiency, "percent", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
- rrdlabels_add(d->st_alloc_efficiency->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
-}
-
-static int init_devices(DICTIONARY *devices, unsigned int zram_id, int update_every) {
- int count = 0;
- struct dirent *de;
- struct stat st;
- procfile *ff = NULL;
- ZRAM_DEVICE device;
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/dev");
- DIR *dir = opendir(filename);
-
- if (unlikely(!dir))
- return 0;
- while ((de = readdir(dir)))
- {
- snprintfz(filename, FILENAME_MAX, "%s/dev/%s", netdata_configured_host_prefix, de->d_name);
- if (unlikely(stat(filename, &st) != 0))
- {
- collector_error("ZRAM : Unable to stat %s: %s", filename, strerror(errno));
- continue;
- }
- if (major(st.st_rdev) == zram_id)
- {
- collector_info("ZRAM : Found device %s", filename);
- snprintfz(filename, FILENAME_MAX, "%s/sys/block/%s/mm_stat", netdata_configured_host_prefix, de->d_name);
- ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (ff == NULL)
- {
- collector_error("ZRAM : Failed to open %s: %s", filename, strerror(errno));
- continue;
- }
- device.file = ff;
- init_rrd(de->d_name, &device, update_every);
- dictionary_set(devices, de->d_name, &device, sizeof(ZRAM_DEVICE));
- count++;
- }
- }
- closedir(dir);
- return count;
-}
-
-static void free_device(DICTIONARY *dict, const char *name)
-{
- ZRAM_DEVICE *d = (ZRAM_DEVICE*)dictionary_get(dict, name);
- collector_info("ZRAM : Disabling monitoring of device %s", name);
- rrdset_obsolete_and_pointer_null(d->st_usage);
- rrdset_obsolete_and_pointer_null(d->st_savings);
- rrdset_obsolete_and_pointer_null(d->st_alloc_efficiency);
- rrdset_obsolete_and_pointer_null(d->st_comp_ratio);
- dictionary_del(dict, name);
-}
-
-static inline int read_mm_stat(procfile *ff, MM_STAT *stats) {
- ff = procfile_readall(ff);
- if (!ff)
- return -1;
- if (procfile_lines(ff) < 1) {
- procfile_close(ff);
- return -1;
- }
- if (procfile_linewords(ff, 0) < 7) {
- procfile_close(ff);
- return -1;
- }
-
- stats->orig_data_size = str2ull(procfile_word(ff, 0), NULL);
- stats->compr_data_size = str2ull(procfile_word(ff, 1), NULL);
- stats->mem_used_total = str2ull(procfile_word(ff, 2), NULL);
- stats->mem_limit = str2ull(procfile_word(ff, 3), NULL);
- stats->mem_used_max = str2ull(procfile_word(ff, 4), NULL);
- stats->same_pages = str2ull(procfile_word(ff, 5), NULL);
- stats->pages_compacted = str2ull(procfile_word(ff, 6), NULL);
- return 0;
-}
-
-static int collect_zram_metrics(const DICTIONARY_ITEM *item, void *entry, void *data) {
- const char *name = dictionary_acquired_item_name(item);
- ZRAM_DEVICE *dev = entry;
- DICTIONARY *dict = data;
-
- MM_STAT mm;
- int value;
-
- if (unlikely(read_mm_stat(dev->file, &mm) < 0)) {
- free_device(dict, name);
- return -1;
- }
-
- // zram_usage
- rrddim_set_by_pointer(dev->st_usage, dev->rd_compr_data_size, mm.compr_data_size);
- rrddim_set_by_pointer(dev->st_usage, dev->rd_metadata_size, mm.mem_used_total - mm.compr_data_size);
- rrdset_done(dev->st_usage);
-
- // zram_savings
- rrddim_set_by_pointer(dev->st_savings, dev->rd_savings_size, mm.compr_data_size - mm.orig_data_size);
- rrddim_set_by_pointer(dev->st_savings, dev->rd_original_size, mm.orig_data_size);
- rrdset_done(dev->st_savings);
-
- // zram_ratio
- value = mm.compr_data_size == 0 ? 1 : mm.orig_data_size * 100 / mm.compr_data_size;
- rrddim_set_by_pointer(dev->st_comp_ratio, dev->rd_comp_ratio, value);
- rrdset_done(dev->st_comp_ratio);
-
- // zram_efficiency
- value = mm.mem_used_total == 0 ? 100 : (mm.compr_data_size * 1000000 / mm.mem_used_total);
- rrddim_set_by_pointer(dev->st_alloc_efficiency, dev->rd_alloc_efficiency, value);
- rrdset_done(dev->st_alloc_efficiency);
-
- return 0;
-}
-
-int do_sys_block_zram(int update_every, usec_t dt) {
- static procfile *ff = NULL;
- static DICTIONARY *devices = NULL;
- static int initialized = 0;
- static int device_count = 0;
- int zram_id = -1;
-
- (void)dt;
-
- if (unlikely(!initialized))
- {
- initialized = 1;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/devices");
-
- ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (ff == NULL)
- {
- collector_error("Cannot read %s", filename);
- return 1;
- }
- ff = procfile_readall(ff);
- if (!ff)
- return 1;
- zram_id = try_get_zram_major_number(ff);
- if (zram_id == -1)
- {
- if (ff != NULL)
- procfile_close(ff);
- return 1;
- }
- procfile_close(ff);
-
- devices = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED, &dictionary_stats_category_collectors, 0);
- device_count = init_devices(devices, (unsigned int)zram_id, update_every);
- }
-
- if (unlikely(device_count < 1))
- return 1;
-
- dictionary_walkthrough_write(devices, collect_zram_metrics, devices);
- return 0;
-} \ No newline at end of file
diff --git a/collectors/proc.plugin/sys_class_drm.c b/collectors/proc.plugin/sys_class_drm.c
deleted file mode 100644
index 3ed1fb875..000000000
--- a/collectors/proc.plugin/sys_class_drm.c
+++ /dev/null
@@ -1,1183 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_DRM_NAME "/sys/class/drm"
-#define CONFIG_SECTION_PLUGIN_PROC_DRM "plugin:proc:/sys/class/drm"
-#define AMDGPU_CHART_TYPE "amdgpu"
-
-struct amdgpu_id_struct {
- unsigned long long asic_id;
- unsigned long long pci_rev_id;
- const char *marketing_name;
-};
-
-/*
- * About amdgpu_ids list:
- * ------------------------------------------------------------------------
- * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
- *
- * The list is copied from:
- * https://raw.githubusercontent.com/Syllo/nvtop/master/src/amdgpu_ids.h
- *
- * which is modified from libdrm (MIT License):
- *
- * URL: https://gitlab.freedesktop.org/mesa/drm/-/blob/main/data/amdgpu.ids
- * ------------------------------------------------------------------------
- * **IMPORTANT**: The amdgpu_ids has to be modified after new GPU releases.
- * ------------------------------------------------------------------------*/
-
-static const struct amdgpu_id_struct amdgpu_ids[] = {
- {0x1309, 0x00, "AMD Radeon R7 Graphics"},
- {0x130A, 0x00, "AMD Radeon R6 Graphics"},
- {0x130B, 0x00, "AMD Radeon R4 Graphics"},
- {0x130C, 0x00, "AMD Radeon R7 Graphics"},
- {0x130D, 0x00, "AMD Radeon R6 Graphics"},
- {0x130E, 0x00, "AMD Radeon R5 Graphics"},
- {0x130F, 0x00, "AMD Radeon R7 Graphics"},
- {0x130F, 0xD4, "AMD Radeon R7 Graphics"},
- {0x130F, 0xD5, "AMD Radeon R7 Graphics"},
- {0x130F, 0xD6, "AMD Radeon R7 Graphics"},
- {0x130F, 0xD7, "AMD Radeon R7 Graphics"},
- {0x1313, 0x00, "AMD Radeon R7 Graphics"},
- {0x1313, 0xD4, "AMD Radeon R7 Graphics"},
- {0x1313, 0xD5, "AMD Radeon R7 Graphics"},
- {0x1313, 0xD6, "AMD Radeon R7 Graphics"},
- {0x1315, 0x00, "AMD Radeon R5 Graphics"},
- {0x1315, 0xD4, "AMD Radeon R5 Graphics"},
- {0x1315, 0xD5, "AMD Radeon R5 Graphics"},
- {0x1315, 0xD6, "AMD Radeon R5 Graphics"},
- {0x1315, 0xD7, "AMD Radeon R5 Graphics"},
- {0x1316, 0x00, "AMD Radeon R5 Graphics"},
- {0x1318, 0x00, "AMD Radeon R5 Graphics"},
- {0x131B, 0x00, "AMD Radeon R4 Graphics"},
- {0x131C, 0x00, "AMD Radeon R7 Graphics"},
- {0x131D, 0x00, "AMD Radeon R6 Graphics"},
- {0x15D8, 0x00, "AMD Radeon RX Vega 8 Graphics WS"},
- {0x15D8, 0x91, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0x91, "AMD Ryzen Embedded R1606G with Radeon Vega Gfx"},
- {0x15D8, 0x92, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0x92, "AMD Ryzen Embedded R1505G with Radeon Vega Gfx"},
- {0x15D8, 0x93, "AMD Radeon Vega 1 Graphics"},
- {0x15D8, 0xA1, "AMD Radeon Vega 10 Graphics"},
- {0x15D8, 0xA2, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xA3, "AMD Radeon Vega 6 Graphics"},
- {0x15D8, 0xA4, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xB1, "AMD Radeon Vega 10 Graphics"},
- {0x15D8, 0xB2, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xB3, "AMD Radeon Vega 6 Graphics"},
- {0x15D8, 0xB4, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xC1, "AMD Radeon Vega 10 Graphics"},
- {0x15D8, 0xC2, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xC3, "AMD Radeon Vega 6 Graphics"},
- {0x15D8, 0xC4, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xC5, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xC8, "AMD Radeon Vega 11 Graphics"},
- {0x15D8, 0xC9, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xCA, "AMD Radeon Vega 11 Graphics"},
- {0x15D8, 0xCB, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xCC, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xCE, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xCF, "AMD Ryzen Embedded R1305G with Radeon Vega Gfx"},
- {0x15D8, 0xD1, "AMD Radeon Vega 10 Graphics"},
- {0x15D8, 0xD2, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xD3, "AMD Radeon Vega 6 Graphics"},
- {0x15D8, 0xD4, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xD8, "AMD Radeon Vega 11 Graphics"},
- {0x15D8, 0xD9, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xDA, "AMD Radeon Vega 11 Graphics"},
- {0x15D8, 0xDB, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xDB, "AMD Radeon Vega 8 Graphics"},
- {0x15D8, 0xDC, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xDD, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xDE, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xDF, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xE3, "AMD Radeon Vega 3 Graphics"},
- {0x15D8, 0xE4, "AMD Ryzen Embedded R1102G with Radeon Vega Gfx"},
- {0x15DD, 0x81, "AMD Ryzen Embedded V1807B with Radeon Vega Gfx"},
- {0x15DD, 0x82, "AMD Ryzen Embedded V1756B with Radeon Vega Gfx"},
- {0x15DD, 0x83, "AMD Ryzen Embedded V1605B with Radeon Vega Gfx"},
- {0x15DD, 0x84, "AMD Radeon Vega 6 Graphics"},
- {0x15DD, 0x85, "AMD Ryzen Embedded V1202B with Radeon Vega Gfx"},
- {0x15DD, 0x86, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0x88, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xC1, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0xC2, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xC3, "AMD Radeon Vega 3 / 10 Graphics"},
- {0x15DD, 0xC4, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xC5, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xC6, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0xC8, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xC9, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0xCA, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xCB, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xCC, "AMD Radeon Vega 6 Graphics"},
- {0x15DD, 0xCE, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xCF, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xD0, "AMD Radeon Vega 10 Graphics"},
- {0x15DD, 0xD1, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xD3, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0xD5, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xD6, "AMD Radeon Vega 11 Graphics"},
- {0x15DD, 0xD7, "AMD Radeon Vega 8 Graphics"},
- {0x15DD, 0xD8, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xD9, "AMD Radeon Vega 6 Graphics"},
- {0x15DD, 0xE1, "AMD Radeon Vega 3 Graphics"},
- {0x15DD, 0xE2, "AMD Radeon Vega 3 Graphics"},
- {0x163F, 0xAE, "AMD Custom GPU 0405"},
- {0x6600, 0x00, "AMD Radeon HD 8600 / 8700M"},
- {0x6600, 0x81, "AMD Radeon R7 M370"},
- {0x6601, 0x00, "AMD Radeon HD 8500M / 8700M"},
- {0x6604, 0x00, "AMD Radeon R7 M265 Series"},
- {0x6604, 0x81, "AMD Radeon R7 M350"},
- {0x6605, 0x00, "AMD Radeon R7 M260 Series"},
- {0x6605, 0x81, "AMD Radeon R7 M340"},
- {0x6606, 0x00, "AMD Radeon HD 8790M"},
- {0x6607, 0x00, "AMD Radeon R5 M240"},
- {0x6608, 0x00, "AMD FirePro W2100"},
- {0x6610, 0x00, "AMD Radeon R7 200 Series"},
- {0x6610, 0x81, "AMD Radeon R7 350"},
- {0x6610, 0x83, "AMD Radeon R5 340"},
- {0x6610, 0x87, "AMD Radeon R7 200 Series"},
- {0x6611, 0x00, "AMD Radeon R7 200 Series"},
- {0x6611, 0x87, "AMD Radeon R7 200 Series"},
- {0x6613, 0x00, "AMD Radeon R7 200 Series"},
- {0x6617, 0x00, "AMD Radeon R7 240 Series"},
- {0x6617, 0x87, "AMD Radeon R7 200 Series"},
- {0x6617, 0xC7, "AMD Radeon R7 240 Series"},
- {0x6640, 0x00, "AMD Radeon HD 8950"},
- {0x6640, 0x80, "AMD Radeon R9 M380"},
- {0x6646, 0x00, "AMD Radeon R9 M280X"},
- {0x6646, 0x80, "AMD Radeon R9 M385"},
- {0x6646, 0x80, "AMD Radeon R9 M470X"},
- {0x6647, 0x00, "AMD Radeon R9 M200X Series"},
- {0x6647, 0x80, "AMD Radeon R9 M380"},
- {0x6649, 0x00, "AMD FirePro W5100"},
- {0x6658, 0x00, "AMD Radeon R7 200 Series"},
- {0x665C, 0x00, "AMD Radeon HD 7700 Series"},
- {0x665D, 0x00, "AMD Radeon R7 200 Series"},
- {0x665F, 0x81, "AMD Radeon R7 360 Series"},
- {0x6660, 0x00, "AMD Radeon HD 8600M Series"},
- {0x6660, 0x81, "AMD Radeon R5 M335"},
- {0x6660, 0x83, "AMD Radeon R5 M330"},
- {0x6663, 0x00, "AMD Radeon HD 8500M Series"},
- {0x6663, 0x83, "AMD Radeon R5 M320"},
- {0x6664, 0x00, "AMD Radeon R5 M200 Series"},
- {0x6665, 0x00, "AMD Radeon R5 M230 Series"},
- {0x6665, 0x83, "AMD Radeon R5 M320"},
- {0x6665, 0xC3, "AMD Radeon R5 M435"},
- {0x6666, 0x00, "AMD Radeon R5 M200 Series"},
- {0x6667, 0x00, "AMD Radeon R5 M200 Series"},
- {0x666F, 0x00, "AMD Radeon HD 8500M"},
- {0x66A1, 0x02, "AMD Instinct MI60 / MI50"},
- {0x66A1, 0x06, "AMD Radeon Pro VII"},
- {0x66AF, 0xC1, "AMD Radeon VII"},
- {0x6780, 0x00, "AMD FirePro W9000"},
- {0x6784, 0x00, "ATI FirePro V (FireGL V) Graphics Adapter"},
- {0x6788, 0x00, "ATI FirePro V (FireGL V) Graphics Adapter"},
- {0x678A, 0x00, "AMD FirePro W8000"},
- {0x6798, 0x00, "AMD Radeon R9 200 / HD 7900 Series"},
- {0x6799, 0x00, "AMD Radeon HD 7900 Series"},
- {0x679A, 0x00, "AMD Radeon HD 7900 Series"},
- {0x679B, 0x00, "AMD Radeon HD 7900 Series"},
- {0x679E, 0x00, "AMD Radeon HD 7800 Series"},
- {0x67A0, 0x00, "AMD Radeon FirePro W9100"},
- {0x67A1, 0x00, "AMD Radeon FirePro W8100"},
- {0x67B0, 0x00, "AMD Radeon R9 200 Series"},
- {0x67B0, 0x80, "AMD Radeon R9 390 Series"},
- {0x67B1, 0x00, "AMD Radeon R9 200 Series"},
- {0x67B1, 0x80, "AMD Radeon R9 390 Series"},
- {0x67B9, 0x00, "AMD Radeon R9 200 Series"},
- {0x67C0, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
- {0x67C0, 0x80, "AMD Radeon E9550"},
- {0x67C2, 0x01, "AMD Radeon Pro V7350x2"},
- {0x67C2, 0x02, "AMD Radeon Pro V7300X"},
- {0x67C4, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
- {0x67C4, 0x80, "AMD Radeon E9560 / E9565 Graphics"},
- {0x67C7, 0x00, "AMD Radeon Pro WX 5100 Graphics"},
- {0x67C7, 0x80, "AMD Radeon E9390 Graphics"},
- {0x67D0, 0x01, "AMD Radeon Pro V7350x2"},
- {0x67D0, 0x02, "AMD Radeon Pro V7300X"},
- {0x67DF, 0xC0, "AMD Radeon Pro 580X"},
- {0x67DF, 0xC1, "AMD Radeon RX 580 Series"},
- {0x67DF, 0xC2, "AMD Radeon RX 570 Series"},
- {0x67DF, 0xC3, "AMD Radeon RX 580 Series"},
- {0x67DF, 0xC4, "AMD Radeon RX 480 Graphics"},
- {0x67DF, 0xC5, "AMD Radeon RX 470 Graphics"},
- {0x67DF, 0xC6, "AMD Radeon RX 570 Series"},
- {0x67DF, 0xC7, "AMD Radeon RX 480 Graphics"},
- {0x67DF, 0xCF, "AMD Radeon RX 470 Graphics"},
- {0x67DF, 0xD7, "AMD Radeon RX 470 Graphics"},
- {0x67DF, 0xE0, "AMD Radeon RX 470 Series"},
- {0x67DF, 0xE1, "AMD Radeon RX 590 Series"},
- {0x67DF, 0xE3, "AMD Radeon RX Series"},
- {0x67DF, 0xE7, "AMD Radeon RX 580 Series"},
- {0x67DF, 0xEB, "AMD Radeon Pro 580X"},
- {0x67DF, 0xEF, "AMD Radeon RX 570 Series"},
- {0x67DF, 0xF7, "AMD Radeon RX P30PH"},
- {0x67DF, 0xFF, "AMD Radeon RX 470 Series"},
- {0x67E0, 0x00, "AMD Radeon Pro WX Series"},
- {0x67E3, 0x00, "AMD Radeon Pro WX 4100"},
- {0x67E8, 0x00, "AMD Radeon Pro WX Series"},
- {0x67E8, 0x01, "AMD Radeon Pro WX Series"},
- {0x67E8, 0x80, "AMD Radeon E9260 Graphics"},
- {0x67EB, 0x00, "AMD Radeon Pro V5300X"},
- {0x67EF, 0xC0, "AMD Radeon RX Graphics"},
- {0x67EF, 0xC1, "AMD Radeon RX 460 Graphics"},
- {0x67EF, 0xC2, "AMD Radeon Pro Series"},
- {0x67EF, 0xC3, "AMD Radeon RX Series"},
- {0x67EF, 0xC5, "AMD Radeon RX 460 Graphics"},
- {0x67EF, 0xC7, "AMD Radeon RX Graphics"},
- {0x67EF, 0xCF, "AMD Radeon RX 460 Graphics"},
- {0x67EF, 0xE0, "AMD Radeon RX 560 Series"},
- {0x67EF, 0xE1, "AMD Radeon RX Series"},
- {0x67EF, 0xE2, "AMD Radeon RX 560X"},
- {0x67EF, 0xE3, "AMD Radeon RX Series"},
- {0x67EF, 0xE5, "AMD Radeon RX 560 Series"},
- {0x67EF, 0xE7, "AMD Radeon RX 560 Series"},
- {0x67EF, 0xEF, "AMD Radeon 550 Series"},
- {0x67EF, 0xFF, "AMD Radeon RX 460 Graphics"},
- {0x67FF, 0xC0, "AMD Radeon Pro 465"},
- {0x67FF, 0xC1, "AMD Radeon RX 560 Series"},
- {0x67FF, 0xCF, "AMD Radeon RX 560 Series"},
- {0x67FF, 0xEF, "AMD Radeon RX 560 Series"},
- {0x67FF, 0xFF, "AMD Radeon RX 550 Series"},
- {0x6800, 0x00, "AMD Radeon HD 7970M"},
- {0x6801, 0x00, "AMD Radeon HD 8970M"},
- {0x6806, 0x00, "AMD Radeon R9 M290X"},
- {0x6808, 0x00, "AMD FirePro W7000"},
- {0x6808, 0x00, "ATI FirePro V (FireGL V) Graphics Adapter"},
- {0x6809, 0x00, "ATI FirePro W5000"},
- {0x6810, 0x00, "AMD Radeon R9 200 Series"},
- {0x6810, 0x81, "AMD Radeon R9 370 Series"},
- {0x6811, 0x00, "AMD Radeon R9 200 Series"},
- {0x6811, 0x81, "AMD Radeon R7 370 Series"},
- {0x6818, 0x00, "AMD Radeon HD 7800 Series"},
- {0x6819, 0x00, "AMD Radeon HD 7800 Series"},
- {0x6820, 0x00, "AMD Radeon R9 M275X"},
- {0x6820, 0x81, "AMD Radeon R9 M375"},
- {0x6820, 0x83, "AMD Radeon R9 M375X"},
- {0x6821, 0x00, "AMD Radeon R9 M200X Series"},
- {0x6821, 0x83, "AMD Radeon R9 M370X"},
- {0x6821, 0x87, "AMD Radeon R7 M380"},
- {0x6822, 0x00, "AMD Radeon E8860"},
- {0x6823, 0x00, "AMD Radeon R9 M200X Series"},
- {0x6825, 0x00, "AMD Radeon HD 7800M Series"},
- {0x6826, 0x00, "AMD Radeon HD 7700M Series"},
- {0x6827, 0x00, "AMD Radeon HD 7800M Series"},
- {0x6828, 0x00, "AMD FirePro W600"},
- {0x682B, 0x00, "AMD Radeon HD 8800M Series"},
- {0x682B, 0x87, "AMD Radeon R9 M360"},
- {0x682C, 0x00, "AMD FirePro W4100"},
- {0x682D, 0x00, "AMD Radeon HD 7700M Series"},
- {0x682F, 0x00, "AMD Radeon HD 7700M Series"},
- {0x6830, 0x00, "AMD Radeon 7800M Series"},
- {0x6831, 0x00, "AMD Radeon 7700M Series"},
- {0x6835, 0x00, "AMD Radeon R7 Series / HD 9000 Series"},
- {0x6837, 0x00, "AMD Radeon HD 7700 Series"},
- {0x683D, 0x00, "AMD Radeon HD 7700 Series"},
- {0x683F, 0x00, "AMD Radeon HD 7700 Series"},
- {0x684C, 0x00, "ATI FirePro V (FireGL V) Graphics Adapter"},
- {0x6860, 0x00, "AMD Radeon Instinct MI25"},
- {0x6860, 0x01, "AMD Radeon Instinct MI25"},
- {0x6860, 0x02, "AMD Radeon Instinct MI25"},
- {0x6860, 0x03, "AMD Radeon Pro V340"},
- {0x6860, 0x04, "AMD Radeon Instinct MI25x2"},
- {0x6860, 0x07, "AMD Radeon Pro V320"},
- {0x6861, 0x00, "AMD Radeon Pro WX 9100"},
- {0x6862, 0x00, "AMD Radeon Pro SSG"},
- {0x6863, 0x00, "AMD Radeon Vega Frontier Edition"},
- {0x6864, 0x03, "AMD Radeon Pro V340"},
- {0x6864, 0x04, "AMD Radeon Instinct MI25x2"},
- {0x6864, 0x05, "AMD Radeon Pro V340"},
- {0x6868, 0x00, "AMD Radeon Pro WX 8200"},
- {0x686C, 0x00, "AMD Radeon Instinct MI25 MxGPU"},
- {0x686C, 0x01, "AMD Radeon Instinct MI25 MxGPU"},
- {0x686C, 0x02, "AMD Radeon Instinct MI25 MxGPU"},
- {0x686C, 0x03, "AMD Radeon Pro V340 MxGPU"},
- {0x686C, 0x04, "AMD Radeon Instinct MI25x2 MxGPU"},
- {0x686C, 0x05, "AMD Radeon Pro V340L MxGPU"},
- {0x686C, 0x06, "AMD Radeon Instinct MI25 MxGPU"},
- {0x687F, 0x01, "AMD Radeon RX Vega"},
- {0x687F, 0xC0, "AMD Radeon RX Vega"},
- {0x687F, 0xC1, "AMD Radeon RX Vega"},
- {0x687F, 0xC3, "AMD Radeon RX Vega"},
- {0x687F, 0xC7, "AMD Radeon RX Vega"},
- {0x6900, 0x00, "AMD Radeon R7 M260"},
- {0x6900, 0x81, "AMD Radeon R7 M360"},
- {0x6900, 0x83, "AMD Radeon R7 M340"},
- {0x6900, 0xC1, "AMD Radeon R5 M465 Series"},
- {0x6900, 0xC3, "AMD Radeon R5 M445 Series"},
- {0x6900, 0xD1, "AMD Radeon 530 Series"},
- {0x6900, 0xD3, "AMD Radeon 530 Series"},
- {0x6901, 0x00, "AMD Radeon R5 M255"},
- {0x6902, 0x00, "AMD Radeon Series"},
- {0x6907, 0x00, "AMD Radeon R5 M255"},
- {0x6907, 0x87, "AMD Radeon R5 M315"},
- {0x6920, 0x00, "AMD Radeon R9 M395X"},
- {0x6920, 0x01, "AMD Radeon R9 M390X"},
- {0x6921, 0x00, "AMD Radeon R9 M390X"},
- {0x6929, 0x00, "AMD FirePro S7150"},
- {0x6929, 0x01, "AMD FirePro S7100X"},
- {0x692B, 0x00, "AMD FirePro W7100"},
- {0x6938, 0x00, "AMD Radeon R9 200 Series"},
- {0x6938, 0xF0, "AMD Radeon R9 200 Series"},
- {0x6938, 0xF1, "AMD Radeon R9 380 Series"},
- {0x6939, 0x00, "AMD Radeon R9 200 Series"},
- {0x6939, 0xF0, "AMD Radeon R9 200 Series"},
- {0x6939, 0xF1, "AMD Radeon R9 380 Series"},
- {0x694C, 0xC0, "AMD Radeon RX Vega M GH Graphics"},
- {0x694E, 0xC0, "AMD Radeon RX Vega M GL Graphics"},
- {0x6980, 0x00, "AMD Radeon Pro WX 3100"},
- {0x6981, 0x00, "AMD Radeon Pro WX 3200 Series"},
- {0x6981, 0x01, "AMD Radeon Pro WX 3200 Series"},
- {0x6981, 0x10, "AMD Radeon Pro WX 3200 Series"},
- {0x6985, 0x00, "AMD Radeon Pro WX 3100"},
- {0x6986, 0x00, "AMD Radeon Pro WX 2100"},
- {0x6987, 0x80, "AMD Embedded Radeon E9171"},
- {0x6987, 0xC0, "AMD Radeon 550X Series"},
- {0x6987, 0xC1, "AMD Radeon RX 640"},
- {0x6987, 0xC3, "AMD Radeon 540X Series"},
- {0x6987, 0xC7, "AMD Radeon 540"},
- {0x6995, 0x00, "AMD Radeon Pro WX 2100"},
- {0x6997, 0x00, "AMD Radeon Pro WX 2100"},
- {0x699F, 0x81, "AMD Embedded Radeon E9170 Series"},
- {0x699F, 0xC0, "AMD Radeon 500 Series"},
- {0x699F, 0xC1, "AMD Radeon 540 Series"},
- {0x699F, 0xC3, "AMD Radeon 500 Series"},
- {0x699F, 0xC7, "AMD Radeon RX 550 / 550 Series"},
- {0x699F, 0xC9, "AMD Radeon 540"},
- {0x6FDF, 0xE7, "AMD Radeon RX 590 GME"},
- {0x6FDF, 0xEF, "AMD Radeon RX 580 2048SP"},
- {0x7300, 0xC1, "AMD FirePro S9300 x2"},
- {0x7300, 0xC8, "AMD Radeon R9 Fury Series"},
- {0x7300, 0xC9, "AMD Radeon Pro Duo"},
- {0x7300, 0xCA, "AMD Radeon R9 Fury Series"},
- {0x7300, 0xCB, "AMD Radeon R9 Fury Series"},
- {0x7312, 0x00, "AMD Radeon Pro W5700"},
- {0x731E, 0xC6, "AMD Radeon RX 5700XTB"},
- {0x731E, 0xC7, "AMD Radeon RX 5700B"},
- {0x731F, 0xC0, "AMD Radeon RX 5700 XT 50th Anniversary"},
- {0x731F, 0xC1, "AMD Radeon RX 5700 XT"},
- {0x731F, 0xC2, "AMD Radeon RX 5600M"},
- {0x731F, 0xC3, "AMD Radeon RX 5700M"},
- {0x731F, 0xC4, "AMD Radeon RX 5700"},
- {0x731F, 0xC5, "AMD Radeon RX 5700 XT"},
- {0x731F, 0xCA, "AMD Radeon RX 5600 XT"},
- {0x731F, 0xCB, "AMD Radeon RX 5600 OEM"},
- {0x7340, 0xC1, "AMD Radeon RX 5500M"},
- {0x7340, 0xC3, "AMD Radeon RX 5300M"},
- {0x7340, 0xC5, "AMD Radeon RX 5500 XT"},
- {0x7340, 0xC7, "AMD Radeon RX 5500"},
- {0x7340, 0xC9, "AMD Radeon RX 5500XTB"},
- {0x7340, 0xCF, "AMD Radeon RX 5300"},
- {0x7341, 0x00, "AMD Radeon Pro W5500"},
- {0x7347, 0x00, "AMD Radeon Pro W5500M"},
- {0x7360, 0x41, "AMD Radeon Pro 5600M"},
- {0x7360, 0xC3, "AMD Radeon Pro V520"},
- {0x738C, 0x01, "AMD Instinct MI100"},
- {0x73A3, 0x00, "AMD Radeon Pro W6800"},
- {0x73A5, 0xC0, "AMD Radeon RX 6950 XT"},
- {0x73AF, 0xC0, "AMD Radeon RX 6900 XT"},
- {0x73BF, 0xC0, "AMD Radeon RX 6900 XT"},
- {0x73BF, 0xC1, "AMD Radeon RX 6800 XT"},
- {0x73BF, 0xC3, "AMD Radeon RX 6800"},
- {0x73DF, 0xC0, "AMD Radeon RX 6750 XT"},
- {0x73DF, 0xC1, "AMD Radeon RX 6700 XT"},
- {0x73DF, 0xC2, "AMD Radeon RX 6800M"},
- {0x73DF, 0xC3, "AMD Radeon RX 6800M"},
- {0x73DF, 0xC5, "AMD Radeon RX 6700 XT"},
- {0x73DF, 0xCF, "AMD Radeon RX 6700M"},
- {0x73DF, 0xD7, "AMD TDC-235"},
- {0x73E1, 0x00, "AMD Radeon Pro W6600M"},
- {0x73E3, 0x00, "AMD Radeon Pro W6600"},
- {0x73EF, 0xC0, "AMD Radeon RX 6800S"},
- {0x73EF, 0xC1, "AMD Radeon RX 6650 XT"},
- {0x73EF, 0xC2, "AMD Radeon RX 6700S"},
- {0x73EF, 0xC3, "AMD Radeon RX 6650M"},
- {0x73EF, 0xC4, "AMD Radeon RX 6650M XT"},
- {0x73FF, 0xC1, "AMD Radeon RX 6600 XT"},
- {0x73FF, 0xC3, "AMD Radeon RX 6600M"},
- {0x73FF, 0xC7, "AMD Radeon RX 6600"},
- {0x73FF, 0xCB, "AMD Radeon RX 6600S"},
- {0x7408, 0x00, "AMD Instinct MI250X"},
- {0x740C, 0x01, "AMD Instinct MI250X / MI250"},
- {0x740F, 0x02, "AMD Instinct MI210"},
- {0x7421, 0x00, "AMD Radeon Pro W6500M"},
- {0x7422, 0x00, "AMD Radeon Pro W6400"},
- {0x7423, 0x00, "AMD Radeon Pro W6300M"},
- {0x7423, 0x01, "AMD Radeon Pro W6300"},
- {0x7424, 0x00, "AMD Radeon RX 6300"},
- {0x743F, 0xC1, "AMD Radeon RX 6500 XT"},
- {0x743F, 0xC3, "AMD Radeon RX 6500"},
- {0x743F, 0xC3, "AMD Radeon RX 6500M"},
- {0x743F, 0xC7, "AMD Radeon RX 6400"},
- {0x743F, 0xCF, "AMD Radeon RX 6300M"},
- {0x744C, 0xC8, "AMD Radeon RX 7900 XTX"},
- {0x744C, 0xCC, "AMD Radeon RX 7900 XT"},
- {0x7480, 0xC1, "AMD Radeon RX 7700S"},
- {0x7480, 0xC3, "AMD Radeon RX 7600S"},
- {0x7480, 0xC7, "AMD Radeon RX 7600M XT"},
- {0x7483, 0xCF, "AMD Radeon RX 7600M"},
- {0x9830, 0x00, "AMD Radeon HD 8400 / R3 Series"},
- {0x9831, 0x00, "AMD Radeon HD 8400E"},
- {0x9832, 0x00, "AMD Radeon HD 8330"},
- {0x9833, 0x00, "AMD Radeon HD 8330E"},
- {0x9834, 0x00, "AMD Radeon HD 8210"},
- {0x9835, 0x00, "AMD Radeon HD 8210E"},
- {0x9836, 0x00, "AMD Radeon HD 8200 / R3 Series"},
- {0x9837, 0x00, "AMD Radeon HD 8280E"},
- {0x9838, 0x00, "AMD Radeon HD 8200 / R3 series"},
- {0x9839, 0x00, "AMD Radeon HD 8180"},
- {0x983D, 0x00, "AMD Radeon HD 8250"},
- {0x9850, 0x00, "AMD Radeon R3 Graphics"},
- {0x9850, 0x03, "AMD Radeon R3 Graphics"},
- {0x9850, 0x40, "AMD Radeon R2 Graphics"},
- {0x9850, 0x45, "AMD Radeon R3 Graphics"},
- {0x9851, 0x00, "AMD Radeon R4 Graphics"},
- {0x9851, 0x01, "AMD Radeon R5E Graphics"},
- {0x9851, 0x05, "AMD Radeon R5 Graphics"},
- {0x9851, 0x06, "AMD Radeon R5E Graphics"},
- {0x9851, 0x40, "AMD Radeon R4 Graphics"},
- {0x9851, 0x45, "AMD Radeon R5 Graphics"},
- {0x9852, 0x00, "AMD Radeon R2 Graphics"},
- {0x9852, 0x40, "AMD Radeon E1 Graphics"},
- {0x9853, 0x00, "AMD Radeon R2 Graphics"},
- {0x9853, 0x01, "AMD Radeon R4E Graphics"},
- {0x9853, 0x03, "AMD Radeon R2 Graphics"},
- {0x9853, 0x05, "AMD Radeon R1E Graphics"},
- {0x9853, 0x06, "AMD Radeon R1E Graphics"},
- {0x9853, 0x07, "AMD Radeon R1E Graphics"},
- {0x9853, 0x08, "AMD Radeon R1E Graphics"},
- {0x9853, 0x40, "AMD Radeon R2 Graphics"},
- {0x9854, 0x00, "AMD Radeon R3 Graphics"},
- {0x9854, 0x01, "AMD Radeon R3E Graphics"},
- {0x9854, 0x02, "AMD Radeon R3 Graphics"},
- {0x9854, 0x05, "AMD Radeon R2 Graphics"},
- {0x9854, 0x06, "AMD Radeon R4 Graphics"},
- {0x9854, 0x07, "AMD Radeon R3 Graphics"},
- {0x9855, 0x02, "AMD Radeon R6 Graphics"},
- {0x9855, 0x05, "AMD Radeon R4 Graphics"},
- {0x9856, 0x00, "AMD Radeon R2 Graphics"},
- {0x9856, 0x01, "AMD Radeon R2E Graphics"},
- {0x9856, 0x02, "AMD Radeon R2 Graphics"},
- {0x9856, 0x05, "AMD Radeon R1E Graphics"},
- {0x9856, 0x06, "AMD Radeon R2 Graphics"},
- {0x9856, 0x07, "AMD Radeon R1E Graphics"},
- {0x9856, 0x08, "AMD Radeon R1E Graphics"},
- {0x9856, 0x13, "AMD Radeon R1E Graphics"},
- {0x9874, 0x81, "AMD Radeon R6 Graphics"},
- {0x9874, 0x84, "AMD Radeon R7 Graphics"},
- {0x9874, 0x85, "AMD Radeon R6 Graphics"},
- {0x9874, 0x87, "AMD Radeon R5 Graphics"},
- {0x9874, 0x88, "AMD Radeon R7E Graphics"},
- {0x9874, 0x89, "AMD Radeon R6E Graphics"},
- {0x9874, 0xC4, "AMD Radeon R7 Graphics"},
- {0x9874, 0xC5, "AMD Radeon R6 Graphics"},
- {0x9874, 0xC6, "AMD Radeon R6 Graphics"},
- {0x9874, 0xC7, "AMD Radeon R5 Graphics"},
- {0x9874, 0xC8, "AMD Radeon R7 Graphics"},
- {0x9874, 0xC9, "AMD Radeon R7 Graphics"},
- {0x9874, 0xCA, "AMD Radeon R5 Graphics"},
- {0x9874, 0xCB, "AMD Radeon R5 Graphics"},
- {0x9874, 0xCC, "AMD Radeon R7 Graphics"},
- {0x9874, 0xCD, "AMD Radeon R7 Graphics"},
- {0x9874, 0xCE, "AMD Radeon R5 Graphics"},
- {0x9874, 0xE1, "AMD Radeon R7 Graphics"},
- {0x9874, 0xE2, "AMD Radeon R7 Graphics"},
- {0x9874, 0xE3, "AMD Radeon R7 Graphics"},
- {0x9874, 0xE4, "AMD Radeon R7 Graphics"},
- {0x9874, 0xE5, "AMD Radeon R5 Graphics"},
- {0x9874, 0xE6, "AMD Radeon R5 Graphics"},
- {0x98E4, 0x80, "AMD Radeon R5E Graphics"},
- {0x98E4, 0x81, "AMD Radeon R4E Graphics"},
- {0x98E4, 0x83, "AMD Radeon R2E Graphics"},
- {0x98E4, 0x84, "AMD Radeon R2E Graphics"},
- {0x98E4, 0x86, "AMD Radeon R1E Graphics"},
- {0x98E4, 0xC0, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xC1, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xC2, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xC4, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xC6, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xC8, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xC9, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xCA, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xD0, "AMD Radeon R2 Graphics"},
- {0x98E4, 0xD1, "AMD Radeon R2 Graphics"},
- {0x98E4, 0xD2, "AMD Radeon R2 Graphics"},
- {0x98E4, 0xD4, "AMD Radeon R2 Graphics"},
- {0x98E4, 0xD9, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xDA, "AMD Radeon R5 Graphics"},
- {0x98E4, 0xDB, "AMD Radeon R3 Graphics"},
- {0x98E4, 0xE1, "AMD Radeon R3 Graphics"},
- {0x98E4, 0xE2, "AMD Radeon R3 Graphics"},
- {0x98E4, 0xE9, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xEA, "AMD Radeon R4 Graphics"},
- {0x98E4, 0xEB, "AMD Radeon R3 Graphics"},
- {0x98E4, 0xEC, "AMD Radeon R4 Graphics"},
- {0x0000, 0x00, "unknown AMD GPU"} // this must always be the last item
-};
-
-struct card {
- const char *pathname;
- struct amdgpu_id_struct id;
-
- /* GPU and VRAM utilizations */
-
- const char *pathname_util_gpu;
- RRDSET *st_util_gpu;
- RRDDIM *rd_util_gpu;
- collected_number util_gpu;
-
- const char *pathname_util_mem;
- RRDSET *st_util_mem;
- RRDDIM *rd_util_mem;
- collected_number util_mem;
-
-
- /* GPU and VRAM clock frequencies */
-
- const char *pathname_clk_gpu;
- procfile *ff_clk_gpu;
- RRDSET *st_clk_gpu;
- RRDDIM *rd_clk_gpu;
- collected_number clk_gpu;
-
- const char *pathname_clk_mem;
- procfile *ff_clk_mem;
- RRDSET *st_clk_mem;
- RRDDIM *rd_clk_mem;
- collected_number clk_mem;
-
-
- /* GPU memory usage */
-
- const char *pathname_mem_used_vram;
- const char *pathname_mem_total_vram;
-
- RRDSET *st_mem_usage_perc_vram;
- RRDDIM *rd_mem_used_perc_vram;
-
- RRDSET *st_mem_usage_vram;
- RRDDIM *rd_mem_used_vram;
- RRDDIM *rd_mem_free_vram;
-
- collected_number used_vram;
- collected_number total_vram;
-
-
- const char *pathname_mem_used_vis_vram;
- const char *pathname_mem_total_vis_vram;
-
- RRDSET *st_mem_usage_perc_vis_vram;
- RRDDIM *rd_mem_used_perc_vis_vram;
-
- RRDSET *st_mem_usage_vis_vram;
- RRDDIM *rd_mem_used_vis_vram;
- RRDDIM *rd_mem_free_vis_vram;
-
- collected_number used_vis_vram;
- collected_number total_vis_vram;
-
-
- const char *pathname_mem_used_gtt;
- const char *pathname_mem_total_gtt;
-
- RRDSET *st_mem_usage_perc_gtt;
- RRDDIM *rd_mem_used_perc_gtt;
-
- RRDSET *st_mem_usage_gtt;
- RRDDIM *rd_mem_used_gtt;
- RRDDIM *rd_mem_free_gtt;
-
- collected_number used_gtt;
- collected_number total_gtt;
-
- struct do_rrd_x *do_rrd_x_root;
-
- struct card *next;
-};
-static struct card *card_root = NULL;
-
-static void card_free(struct card *c){
- if(c->pathname) freez((void *) c->pathname);
- if(c->id.marketing_name) freez((void *) c->id.marketing_name);
-
- /* remove card from linked list */
- if(c == card_root) card_root = c->next;
- else {
- struct card *last;
- for(last = card_root; last && last->next != c; last = last->next);
- if(last) last->next = c->next;
- }
-
- freez(c);
-}
-
-static int check_card_is_amdgpu(const char *const pathname){
- int rc = -1;
-
- procfile *ff = procfile_open(pathname, " ", PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)){
- rc = -1;
- goto cleanup;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff || procfile_lines(ff) < 1 || procfile_linewords(ff, 0) < 1)){
- rc = -2;
- goto cleanup;
- }
-
- for(size_t l = 0; l < procfile_lines(ff); l++) {
- if(!strcmp(procfile_lineword(ff, l, 0), "DRIVER=amdgpu")){
- rc = 0;
- goto cleanup;
- }
- }
-
- rc = -3; // no match
-
-cleanup:
- procfile_close(ff);
- return rc;
-}
-
-static int read_clk_freq_file(procfile **p_ff, const char *const pathname, collected_number *num){
- if(unlikely(!*p_ff)){
- *p_ff = procfile_open(pathname, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!*p_ff)) return -2;
- }
-
- if(unlikely(NULL == (*p_ff = procfile_readall(*p_ff)))) return -3;
-
- for(size_t l = 0; l < procfile_lines(*p_ff) ; l++) {
- char *str_with_units = NULL;
- if((*p_ff)->lines->lines[l].words >= 3 && !strcmp(procfile_lineword((*p_ff), l, 2), "*")) //format: X: collected_number *
- str_with_units = procfile_lineword((*p_ff), l, 1);
- else if ((*p_ff)->lines->lines[l].words == 2 && !strcmp(procfile_lineword((*p_ff), l, 1), "*")) //format: collected_number *
- str_with_units = procfile_lineword((*p_ff), l, 0);
-
- if (str_with_units) {
- char *delim = strchr(str_with_units, 'M');
- char str_without_units[10];
- memcpy(str_without_units, str_with_units, delim - str_with_units);
- *num = str2ll(str_without_units, NULL);
- return 0;
- }
- }
-
- procfile_close((*p_ff));
- return -4;
-}
-
-static char *set_id(const char *const suf_1, const char *const suf_2, const char *const suf_3){
- static char id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_%s_%s", suf_1, suf_2, suf_3);
- return id;
-}
-
-typedef int (*do_rrd_x_func)(struct card *const c);
-
-struct do_rrd_x {
- do_rrd_x_func func;
- struct do_rrd_x *next;
-};
-
-static void add_do_rrd_x(struct card *const c, const do_rrd_x_func func){
- struct do_rrd_x *const drrd = callocz(1, sizeof(struct do_rrd_x));
- drrd->func = func;
- drrd->next = c->do_rrd_x_root;
- c->do_rrd_x_root = drrd;
-}
-
-static void rm_do_rrd_x(struct card *const c, struct do_rrd_x *const drrd){
- if(drrd == c->do_rrd_x_root) c->do_rrd_x_root = drrd->next;
- else {
- struct do_rrd_x *last;
- for(last = c->do_rrd_x_root; last && last->next != drrd; last = last->next);
- if(last) last->next = drrd->next;
- }
-
- freez(drrd);
-}
-
-static int do_rrd_util_gpu(struct card *const c){
- if(likely(!read_single_number_file(c->pathname_util_gpu, (unsigned long long *) &c->util_gpu))){
- rrddim_set_by_pointer(c->st_util_gpu, c->rd_util_gpu, c->util_gpu);
- rrdset_done(c->st_util_gpu);
- return 0;
- }
- else {
- collector_error("Cannot read util_gpu for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_util_gpu);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_util_gpu);
- return 1;
- }
-}
-
-static int do_rrd_util_mem(struct card *const c){
- if(likely(!read_single_number_file(c->pathname_util_mem, (unsigned long long *) &c->util_mem))){
- rrddim_set_by_pointer(c->st_util_mem, c->rd_util_mem, c->util_mem);
- rrdset_done(c->st_util_mem);
- return 0;
- }
- else {
- collector_error("Cannot read util_mem for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_util_mem);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_util_mem);
- return 1;
- }
-}
-
-static int do_rrd_clk_gpu(struct card *const c){
- if(likely(!read_clk_freq_file(&c->ff_clk_gpu, (char *) c->pathname_clk_gpu, &c->clk_gpu))){
- rrddim_set_by_pointer(c->st_clk_gpu, c->rd_clk_gpu, c->clk_gpu);
- rrdset_done(c->st_clk_gpu);
- return 0;
- }
- else {
- collector_error("Cannot read clk_gpu for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_clk_gpu);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_gpu);
- return 1;
- }
-}
-
-static int do_rrd_clk_mem(struct card *const c){
- if(likely(!read_clk_freq_file(&c->ff_clk_mem, (char *) c->pathname_clk_mem, &c->clk_mem))){
- rrddim_set_by_pointer(c->st_clk_mem, c->rd_clk_mem, c->clk_mem);
- rrdset_done(c->st_clk_mem);
- return 0;
- }
- else {
- collector_error("Cannot read clk_mem for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_clk_mem);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_clk_mem);
- return 1;
- }
-}
-
-static int do_rrd_vram(struct card *const c){
- if(likely(!read_single_number_file(c->pathname_mem_used_vram, (unsigned long long *) &c->used_vram) &&
- c->total_vram)){
- rrddim_set_by_pointer( c->st_mem_usage_perc_vram,
- c->rd_mem_used_perc_vram,
- c->used_vram * 10000 / c->total_vram);
- rrdset_done(c->st_mem_usage_perc_vram);
-
- rrddim_set_by_pointer(c->st_mem_usage_vram, c->rd_mem_used_vram, c->used_vram);
- rrddim_set_by_pointer(c->st_mem_usage_vram, c->rd_mem_free_vram, c->total_vram - c->used_vram);
- rrdset_done(c->st_mem_usage_vram);
- return 0;
- }
- else {
- collector_error("Cannot read used_vram for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_mem_used_vram);
- freez((void *) c->pathname_mem_total_vram);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vram);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vram);
- return 1;
- }
-}
-
-static int do_rrd_vis_vram(struct card *const c){
- if(likely(!read_single_number_file(c->pathname_mem_used_vis_vram, (unsigned long long *) &c->used_vis_vram) &&
- c->total_vis_vram)){
- rrddim_set_by_pointer( c->st_mem_usage_perc_vis_vram,
- c->rd_mem_used_perc_vis_vram,
- c->used_vis_vram * 10000 / c->total_vis_vram);
- rrdset_done(c->st_mem_usage_perc_vis_vram);
-
- rrddim_set_by_pointer(c->st_mem_usage_vis_vram, c->rd_mem_used_vis_vram, c->used_vis_vram);
- rrddim_set_by_pointer(c->st_mem_usage_vis_vram, c->rd_mem_free_vis_vram, c->total_vis_vram - c->used_vis_vram);
- rrdset_done(c->st_mem_usage_vis_vram);
- return 0;
- }
- else {
- collector_error("Cannot read used_vis_vram for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_mem_used_vis_vram);
- freez((void *) c->pathname_mem_total_vis_vram);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_vis_vram);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_vis_vram);
- return 1;
- }
-}
-
-static int do_rrd_gtt(struct card *const c){
- if(likely(!read_single_number_file(c->pathname_mem_used_gtt, (unsigned long long *) &c->used_gtt) &&
- c->total_gtt)){
- rrddim_set_by_pointer( c->st_mem_usage_perc_gtt,
- c->rd_mem_used_perc_gtt,
- c->used_gtt * 10000 / c->total_gtt);
- rrdset_done(c->st_mem_usage_perc_gtt);
-
- rrddim_set_by_pointer(c->st_mem_usage_gtt, c->rd_mem_used_gtt, c->used_gtt);
- rrddim_set_by_pointer(c->st_mem_usage_gtt, c->rd_mem_free_gtt, c->total_gtt - c->used_gtt);
- rrdset_done(c->st_mem_usage_gtt);
- return 0;
- }
- else {
- collector_error("Cannot read used_gtt for %s: [%s]", c->pathname, c->id.marketing_name);
- freez((void *) c->pathname_mem_used_gtt);
- freez((void *) c->pathname_mem_total_gtt);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_perc_gtt);
- rrdset_is_obsolete___safe_from_collector_thread(c->st_mem_usage_gtt);
- return 1;
- }
-}
-
-int do_sys_class_drm(int update_every, usec_t dt) {
- (void)dt;
-
- static DIR *drm_dir = NULL;
-
- int chart_prio = NETDATA_CHART_PRIO_DRM_AMDGPU;
-
- if(unlikely(!drm_dir)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/drm");
- char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename);
- if(unlikely(NULL == (drm_dir = opendir(drm_dir_name)))){
- collector_error("Cannot read directory '%s'", drm_dir_name);
- return 1;
- }
-
- struct dirent *de = NULL;
- while(likely(de = readdir(drm_dir))) {
- if( de->d_type == DT_DIR && ((de->d_name[0] == '.' && de->d_name[1] == '\0') ||
- (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0'))) continue;
-
- if(de->d_type == DT_LNK && !strncmp(de->d_name, "card", 4) && !strchr(de->d_name, '-')) {
- char filename[FILENAME_MAX + 1];
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s", drm_dir_name, de->d_name, "device/uevent");
- if(check_card_is_amdgpu(filename)) continue;
-
- /* Get static info */
-
- struct card *const c = callocz(1, sizeof(struct card));
- snprintfz(filename, FILENAME_MAX, "%s/%s", drm_dir_name, de->d_name);
- c->pathname = strdupz(filename);
-
- snprintfz(filename, FILENAME_MAX, "%s/%s", c->pathname, "device/device");
- if(read_single_base64_or_hex_number_file(filename, &c->id.asic_id)){
- collector_error("Cannot read asic_id from '%s'", filename);
- card_free(c);
- continue;
- }
-
- snprintfz(filename, FILENAME_MAX, "%s/%s", c->pathname, "device/revision");
- if(read_single_base64_or_hex_number_file(filename, &c->id.pci_rev_id)){
- collector_error("Cannot read pci_rev_id from '%s'", filename);
- card_free(c);
- continue;
- }
-
- for(int i = 0; amdgpu_ids[i].asic_id; i++){
- if(c->id.asic_id == amdgpu_ids[i].asic_id && c->id.pci_rev_id == amdgpu_ids[i].pci_rev_id){
- c->id.marketing_name = strdupz(amdgpu_ids[i].marketing_name);
- break;
- }
- }
- if(!c->id.marketing_name)
- c->id.marketing_name = strdupz(amdgpu_ids[sizeof(amdgpu_ids)/sizeof(amdgpu_ids[0]) - 1].marketing_name);
-
-
- collected_number tmp_val;
- #define set_prop_pathname(prop_filename, prop_pathname, p_ff){ \
- snprintfz(filename, FILENAME_MAX, "%s/%s", c->pathname, prop_filename); \
- if((p_ff && !read_clk_freq_file(p_ff, filename, &tmp_val)) || \
- !read_single_number_file(filename, (unsigned long long *) &tmp_val)) \
- prop_pathname = strdupz(filename); \
- else \
- collector_info("Cannot read file '%s'", filename); \
- }
-
- /* Initialize GPU and VRAM utilization metrics */
-
- set_prop_pathname("device/gpu_busy_percent", c->pathname_util_gpu, NULL);
-
- if(c->pathname_util_gpu){
- c->st_util_gpu = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_utilization", c->id.marketing_name, de->d_name)
- , NULL
- , "utilization"
- , AMDGPU_CHART_TYPE ".gpu_utilization"
- , "GPU utilization"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_util_gpu->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_util_gpu = rrddim_add(c->st_util_gpu, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_do_rrd_x(c, do_rrd_util_gpu);
- }
-
- set_prop_pathname("device/mem_busy_percent", c->pathname_util_mem, NULL);
-
- if(c->pathname_util_mem){
- c->st_util_mem = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_utilization", c->id.marketing_name, de->d_name)
- , NULL
- , "utilization"
- , AMDGPU_CHART_TYPE ".gpu_mem_utilization"
- , "GPU memory utilization"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_util_mem->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_util_mem = rrddim_add(c->st_util_mem, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_do_rrd_x(c, do_rrd_util_mem);
- }
-
-
- /* Initialize GPU and VRAM clock frequency metrics */
-
- set_prop_pathname("device/pp_dpm_sclk", c->pathname_clk_gpu, &c->ff_clk_gpu);
-
- if(c->pathname_clk_gpu){
- c->st_clk_gpu = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_clk_frequency", c->id.marketing_name, de->d_name)
- , NULL
- , "frequency"
- , AMDGPU_CHART_TYPE ".gpu_clk_frequency"
- , "GPU clock frequency"
- , "MHz"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_clk_gpu->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_clk_gpu = rrddim_add(c->st_clk_gpu, "frequency", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_do_rrd_x(c, do_rrd_clk_gpu);
-
- }
-
- set_prop_pathname("device/pp_dpm_mclk", c->pathname_clk_mem, &c->ff_clk_mem);
-
- if(c->pathname_clk_mem){
- c->st_clk_mem = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_clk_frequency", c->id.marketing_name, de->d_name)
- , NULL
- , "frequency"
- , AMDGPU_CHART_TYPE ".gpu_mem_clk_frequency"
- , "GPU memory clock frequency"
- , "MHz"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_clk_mem->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_clk_mem = rrddim_add(c->st_clk_mem, "frequency", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_do_rrd_x(c, do_rrd_clk_mem);
- }
-
-
- /* Initialize GPU memory usage metrics */
-
- set_prop_pathname("device/mem_info_vram_used", c->pathname_mem_used_vram, NULL);
- set_prop_pathname("device/mem_info_vram_total", c->pathname_mem_total_vram, NULL);
- if(c->pathname_mem_total_vram) c->total_vram = tmp_val;
-
- if(c->pathname_mem_used_vram && c->pathname_mem_total_vram){
- c->st_mem_usage_perc_vram = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_vram_usage_perc", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_vram_usage_perc"
- , "VRAM memory usage percentage"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_mem_usage_perc_vram->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_used_perc_vram = rrddim_add(c->st_mem_usage_perc_vram, "usage", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
-
- c->st_mem_usage_vram = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_vram_usage", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_vram_usage"
- , "VRAM memory usage"
- , "bytes"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdlabels_add(c->st_mem_usage_vram->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_free_vram = rrddim_add(c->st_mem_usage_vram, "free", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- c->rd_mem_used_vram = rrddim_add(c->st_mem_usage_vram, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
-
- add_do_rrd_x(c, do_rrd_vram);
- }
-
- set_prop_pathname("device/mem_info_vis_vram_used", c->pathname_mem_used_vis_vram, NULL);
- set_prop_pathname("device/mem_info_vis_vram_total", c->pathname_mem_total_vis_vram, NULL);
- if(c->pathname_mem_total_vis_vram) c->total_vis_vram = tmp_val;
-
- if(c->pathname_mem_used_vis_vram && c->pathname_mem_total_vis_vram){
- c->st_mem_usage_perc_vis_vram = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_vis_vram_usage_perc", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_vis_vram_usage_perc"
- , "visible VRAM memory usage percentage"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_mem_usage_perc_vis_vram->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_used_perc_vis_vram = rrddim_add(c->st_mem_usage_perc_vis_vram, "usage", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
-
- c->st_mem_usage_vis_vram = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_vis_vram_usage", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_vis_vram_usage"
- , "visible VRAM memory usage"
- , "bytes"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdlabels_add(c->st_mem_usage_vis_vram->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_free_vis_vram = rrddim_add(c->st_mem_usage_vis_vram, "free", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- c->rd_mem_used_vis_vram = rrddim_add(c->st_mem_usage_vis_vram, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
-
- add_do_rrd_x(c, do_rrd_vis_vram);
- }
-
- set_prop_pathname("device/mem_info_gtt_used", c->pathname_mem_used_gtt, NULL);
- set_prop_pathname("device/mem_info_gtt_total", c->pathname_mem_total_gtt, NULL);
- if(c->pathname_mem_total_gtt) c->total_gtt = tmp_val;
-
- if(c->pathname_mem_used_gtt && c->pathname_mem_total_gtt){
- c->st_mem_usage_perc_gtt = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_gtt_usage_perc", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_gtt_usage_perc"
- , "GTT memory usage percentage"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(c->st_mem_usage_perc_gtt->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_used_perc_gtt = rrddim_add(c->st_mem_usage_perc_gtt, "usage", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
- c->st_mem_usage_gtt = rrdset_create_localhost(
- AMDGPU_CHART_TYPE
- , set_id("gpu_mem_gtt_usage", c->id.marketing_name, de->d_name)
- , NULL
- , "memory_usage"
- , AMDGPU_CHART_TYPE ".gpu_mem_gtt_usage"
- , "GTT memory usage"
- , "bytes"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DRM_NAME
- , chart_prio++
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdlabels_add(c->st_mem_usage_gtt->rrdlabels, "product_name", c->id.marketing_name, RRDLABEL_SRC_AUTO);
-
- c->rd_mem_free_gtt = rrddim_add(c->st_mem_usage_gtt, "free", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- c->rd_mem_used_gtt = rrddim_add(c->st_mem_usage_gtt, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
-
- add_do_rrd_x(c, do_rrd_gtt);
- }
-
- c->next = card_root;
- card_root = c;
- }
- }
- }
-
-
- struct card *card_cur = card_root,
- *card_next;
- while(card_cur){
-
- struct do_rrd_x *do_rrd_x_cur = card_cur->do_rrd_x_root,
- *do_rrd_x_next;
- while(do_rrd_x_cur){
- if(unlikely(do_rrd_x_cur->func(card_cur))) {
- do_rrd_x_next = do_rrd_x_cur->next;
- rm_do_rrd_x(card_cur, do_rrd_x_cur);
- do_rrd_x_cur = do_rrd_x_next;
- }
- else do_rrd_x_cur = do_rrd_x_cur->next;
- }
-
- if(unlikely(!card_cur->do_rrd_x_root)){
- card_next = card_cur->next;
- card_free(card_cur);
- card_cur = card_next;
- }
- else card_cur = card_cur->next;
- }
-
- return card_root ? 0 : 1;
-}
diff --git a/collectors/proc.plugin/sys_class_infiniband.c b/collectors/proc.plugin/sys_class_infiniband.c
deleted file mode 100644
index d12a34513..000000000
--- a/collectors/proc.plugin/sys_class_infiniband.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-// Heavily inspired from proc_net_dev.c
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_INFINIBAND_NAME "/sys/class/infiniband"
-#define CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND \
- "plugin:" PLUGIN_PROC_CONFIG_NAME ":" PLUGIN_PROC_MODULE_INFINIBAND_NAME
-
-// ib_device::name[IB_DEVICE_NAME_MAX(64)] + "-" + ib_device::phys_port_cnt[u8 = 3 chars]
-#define IBNAME_MAX 68
-
-// ----------------------------------------------------------------------------
-// infiniband & omnipath standard counters
-
-// I use macro as there's no single file acting as summary, but a lot of different files, so can't use helpers like
-// procfile(). Also, omnipath generates other counters, that are not provided by infiniband
-#define FOREACH_COUNTER(GEN, ...) \
- FOREACH_COUNTER_BYTES(GEN, __VA_ARGS__) \
- FOREACH_COUNTER_PACKETS(GEN, __VA_ARGS__) \
- FOREACH_COUNTER_ERRORS(GEN, __VA_ARGS__)
-
-#define FOREACH_COUNTER_BYTES(GEN, ...) \
- GEN(port_rcv_data, bytes, "Received", 1, __VA_ARGS__) \
- GEN(port_xmit_data, bytes, "Sent", -1, __VA_ARGS__)
-
-#define FOREACH_COUNTER_PACKETS(GEN, ...) \
- GEN(port_rcv_packets, packets, "Received", 1, __VA_ARGS__) \
- GEN(port_xmit_packets, packets, "Sent", -1, __VA_ARGS__) \
- GEN(multicast_rcv_packets, packets, "Mcast rcvd", 1, __VA_ARGS__) \
- GEN(multicast_xmit_packets, packets, "Mcast sent", -1, __VA_ARGS__) \
- GEN(unicast_rcv_packets, packets, "Ucast rcvd", 1, __VA_ARGS__) \
- GEN(unicast_xmit_packets, packets, "Ucast sent", -1, __VA_ARGS__)
-
-#define FOREACH_COUNTER_ERRORS(GEN, ...) \
- GEN(port_rcv_errors, errors, "Pkts malformated", 1, __VA_ARGS__) \
- GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \
- GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \
- GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \
- GEN(VL15_dropped, errors, "Pkts missed resource", 1, __VA_ARGS__) \
- GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \
- GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \
- GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \
- GEN(local_link_integrity_errors, errors, "Link integrity err", 1, __VA_ARGS__) \
- GEN(symbol_error, errors, "Link minor errors", 1, __VA_ARGS__) \
- GEN(port_rcv_remote_physical_errors, errors, "Pkts rcvd with EBP", 1, __VA_ARGS__) \
- GEN(port_rcv_switch_relay_errors, errors, "Pkts rcvd discarded by switch", 1, __VA_ARGS__) \
- GEN(port_xmit_constraint_errors, errors, "Pkts sent discarded by switch", 1, __VA_ARGS__)
-
-//
-// Hardware Counters
-//
-
-// IMPORTANT: These are vendor-specific fields.
-// If you want to add a new vendor, search this for for 'VENDORS:' keyword and
-// add your definition as 'VENDOR-<key>:' where <key> if the string part that
-// is shown in /sys/class/infiniband/<key>X_Y
-// EG: for Mellanox, shown as mlx0_1, it's 'mlx'
-// for Intel, shown as hfi1_1, it's 'hfi'
-
-// VENDORS: List of implemented hardware vendors
-#define FOREACH_HWCOUNTER_NAME(GEN, ...) GEN(mlx, __VA_ARGS__)
-
-// VENDOR-MLX: HW Counters for Mellanox ConnectX Devices
-#define FOREACH_HWCOUNTER_MLX(GEN, ...) \
- FOREACH_HWCOUNTER_MLX_PACKETS(GEN, __VA_ARGS__) \
- FOREACH_HWCOUNTER_MLX_ERRORS(GEN, __VA_ARGS__)
-
-#define FOREACH_HWCOUNTER_MLX_PACKETS(GEN, ...) \
- GEN(np_cnp_sent, hwpackets, "RoCEv2 Congestion sent", 1, __VA_ARGS__) \
- GEN(np_ecn_marked_roce_packets, hwpackets, "RoCEv2 Congestion rcvd", -1, __VA_ARGS__) \
- GEN(rp_cnp_handled, hwpackets, "IB Congestion handled", 1, __VA_ARGS__) \
- GEN(rx_atomic_requests, hwpackets, "ATOMIC req. rcvd", 1, __VA_ARGS__) \
- GEN(rx_dct_connect, hwpackets, "Connection req. rcvd", 1, __VA_ARGS__) \
- GEN(rx_read_requests, hwpackets, "Read req. rcvd", 1, __VA_ARGS__) \
- GEN(rx_write_requests, hwpackets, "Write req. rcvd", 1, __VA_ARGS__) \
- GEN(roce_adp_retrans, hwpackets, "RoCE retrans adaptive", 1, __VA_ARGS__) \
- GEN(roce_adp_retrans_to, hwpackets, "RoCE retrans timeout", 1, __VA_ARGS__) \
- GEN(roce_slow_restart, hwpackets, "RoCE slow restart", 1, __VA_ARGS__) \
- GEN(roce_slow_restart_cnps, hwpackets, "RoCE slow restart congestion", 1, __VA_ARGS__) \
- GEN(roce_slow_restart_trans, hwpackets, "RoCE slow restart count", 1, __VA_ARGS__)
-
-#define FOREACH_HWCOUNTER_MLX_ERRORS(GEN, ...) \
- GEN(duplicate_request, hwerrors, "Duplicated packets", -1, __VA_ARGS__) \
- GEN(implied_nak_seq_err, hwerrors, "Pkt Seq Num gap", 1, __VA_ARGS__) \
- GEN(local_ack_timeout_err, hwerrors, "Ack timer expired", 1, __VA_ARGS__) \
- GEN(out_of_buffer, hwerrors, "Drop missing buffer", 1, __VA_ARGS__) \
- GEN(out_of_sequence, hwerrors, "Drop out of sequence", 1, __VA_ARGS__) \
- GEN(packet_seq_err, hwerrors, "NAK sequence rcvd", 1, __VA_ARGS__) \
- GEN(req_cqe_error, hwerrors, "CQE err Req", 1, __VA_ARGS__) \
- GEN(resp_cqe_error, hwerrors, "CQE err Resp", 1, __VA_ARGS__) \
- GEN(req_cqe_flush_error, hwerrors, "CQE Flushed err Req", 1, __VA_ARGS__) \
- GEN(resp_cqe_flush_error, hwerrors, "CQE Flushed err Resp", 1, __VA_ARGS__) \
- GEN(req_remote_access_errors, hwerrors, "Remote access err Req", 1, __VA_ARGS__) \
- GEN(resp_remote_access_errors, hwerrors, "Remote access err Resp", 1, __VA_ARGS__) \
- GEN(req_remote_invalid_request, hwerrors, "Remote invalid req", 1, __VA_ARGS__) \
- GEN(resp_local_length_error, hwerrors, "Local length err Resp", 1, __VA_ARGS__) \
- GEN(rnr_nak_retry_err, hwerrors, "RNR NAK Packets", 1, __VA_ARGS__) \
- GEN(rp_cnp_ignored, hwerrors, "CNP Pkts ignored", 1, __VA_ARGS__) \
- GEN(rx_icrc_encapsulated, hwerrors, "RoCE ICRC Errors", 1, __VA_ARGS__)
-
-// Common definitions used more than once
-#define GEN_RRD_DIM_ADD(NAME, GRP, DESC, DIR, PORT) \
- GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, 1, 1, RRD_ALGORITHM_INCREMENTAL)
-
-#define GEN_RRD_DIM_ADD_CUSTOM(NAME, GRP, DESC, DIR, PORT, MULT, DIV, ALGO) \
- PORT->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR * MULT, DIV, ALGO);
-
-#define GEN_RRD_DIM_ADD_HW(NAME, GRP, DESC, DIR, PORT, HW) \
- HW->rd_##NAME = rrddim_add(PORT->st_##GRP, DESC, NULL, DIR, 1, RRD_ALGORITHM_INCREMENTAL);
-
-#define GEN_RRD_DIM_SETP(NAME, GRP, DESC, DIR, PORT) \
- rrddim_set_by_pointer(PORT->st_##GRP, PORT->rd_##NAME, (collected_number)PORT->NAME);
-
-#define GEN_RRD_DIM_SETP_HW(NAME, GRP, DESC, DIR, PORT, HW) \
- rrddim_set_by_pointer(PORT->st_##GRP, HW->rd_##NAME, (collected_number)HW->NAME);
-
-// https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters
-// https://community.mellanox.com/s/article/infiniband-port-counters
-static struct ibport {
- char *name;
- char *counters_path;
- char *hwcounters_path;
- int len;
-
- // flags
- int configured;
- int enabled;
- int discovered;
-
- int do_bytes;
- int do_packets;
- int do_errors;
- int do_hwpackets;
- int do_hwerrors;
-
- const char *chart_type_bytes;
- const char *chart_type_packets;
- const char *chart_type_errors;
- const char *chart_type_hwpackets;
- const char *chart_type_hwerrors;
-
- const char *chart_id_bytes;
- const char *chart_id_packets;
- const char *chart_id_errors;
- const char *chart_id_hwpackets;
- const char *chart_id_hwerrors;
-
- const char *chart_family;
-
- unsigned long priority;
-
- // Port details using drivers/infiniband/core/sysfs.c :: rate_show()
- RRDDIM *rd_speed;
- uint64_t speed;
- uint64_t width;
-
-// Stats from /$device/ports/$portid/counters
-// as drivers/infiniband/hw/qib/qib_verbs.h
-// All uint64 except vl15_dropped, local_link_integrity_errors, excessive_buffer_overrun_errors uint32
-// Will generate 2 elements for each counter:
-// - uint64_t to store the value
-// - char* to store the filename path
-// - RRDDIM* to store the RRD Dimension
-#define GEN_DEF_COUNTER(NAME, ...) \
- uint64_t NAME; \
- char *file_##NAME; \
- RRDDIM *rd_##NAME;
- FOREACH_COUNTER(GEN_DEF_COUNTER)
-
-// Vendor specific hwcounters from /$device/ports/$portid/hw_counters
-// We will generate one struct pointer per vendor to avoid future casting
-#define GEN_DEF_HWCOUNTER_PTR(VENDOR, ...) struct ibporthw_##VENDOR *hwcounters_##VENDOR;
- FOREACH_HWCOUNTER_NAME(GEN_DEF_HWCOUNTER_PTR)
-
- // Function pointer to the "infiniband_hwcounters_parse_<vendor>" function
- void (*hwcounters_parse)(struct ibport *);
- void (*hwcounters_dorrd)(struct ibport *);
-
- // charts and dim
- RRDSET *st_bytes;
- RRDSET *st_packets;
- RRDSET *st_errors;
- RRDSET *st_hwpackets;
- RRDSET *st_hwerrors;
-
- const RRDSETVAR_ACQUIRED *stv_speed;
-
- usec_t speed_last_collected_usec;
-
- struct ibport *next;
-} *ibport_root = NULL, *ibport_last_used = NULL;
-
-// VENDORS: reading / calculation functions
-#define GEN_DEF_HWCOUNTER(NAME, ...) \
- uint64_t NAME; \
- char *file_##NAME; \
- RRDDIM *rd_##NAME;
-
-#define GEN_DO_HWCOUNTER_READ(NAME, GRP, DESC, DIR, PORT, HW, ...) \
- if (HW->file_##NAME) { \
- if (read_single_number_file(HW->file_##NAME, (unsigned long long *)&HW->NAME)) { \
- collector_error("cannot read iface '%s' hwcounter '" #HW "'", PORT->name); \
- HW->file_##NAME = NULL; \
- } \
- }
-
-// VENDOR-MLX: Mellanox
-struct ibporthw_mlx {
- FOREACH_HWCOUNTER_MLX(GEN_DEF_HWCOUNTER)
-};
-void infiniband_hwcounters_parse_mlx(struct ibport *port)
-{
- if (port->do_hwerrors != CONFIG_BOOLEAN_NO)
- FOREACH_HWCOUNTER_MLX_ERRORS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
- if (port->do_hwpackets != CONFIG_BOOLEAN_NO)
- FOREACH_HWCOUNTER_MLX_PACKETS(GEN_DO_HWCOUNTER_READ, port, port->hwcounters_mlx)
-}
-void infiniband_hwcounters_dorrd_mlx(struct ibport *port)
-{
- if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
- FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
- rrdset_done(port->st_hwerrors);
- }
- if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
- FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_SETP_HW, port, port->hwcounters_mlx)
- rrdset_done(port->st_hwpackets);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static struct ibport *get_ibport(const char *dev, const char *port)
-{
- struct ibport *p;
-
- char name[IBNAME_MAX + 1];
- snprintfz(name, IBNAME_MAX, "%s-%s", dev, port);
-
- // search it, resuming from the last position in sequence
- for (p = ibport_last_used; p; p = p->next) {
- if (unlikely(!strcmp(name, p->name))) {
- ibport_last_used = p->next;
- return p;
- }
- }
-
- // new round, from the beginning to the last position used this time
- for (p = ibport_root; p != ibport_last_used; p = p->next) {
- if (unlikely(!strcmp(name, p->name))) {
- ibport_last_used = p->next;
- return p;
- }
- }
-
- // create a new one
- p = callocz(1, sizeof(struct ibport));
- p->name = strdupz(name);
- p->len = strlen(p->name);
-
- p->chart_type_bytes = strdupz("infiniband_cnt_bytes");
- p->chart_type_packets = strdupz("infiniband_cnt_packets");
- p->chart_type_errors = strdupz("infiniband_cnt_errors");
- p->chart_type_hwpackets = strdupz("infiniband_hwc_packets");
- p->chart_type_hwerrors = strdupz("infiniband_hwc_errors");
-
- char buffer[RRD_ID_LENGTH_MAX + 1];
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntbytes_%s", p->name);
- p->chart_id_bytes = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cntpackets_%s", p->name);
- p->chart_id_packets = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_cnterrors_%s", p->name);
- p->chart_id_errors = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcntpackets_%s", p->name);
- p->chart_id_hwpackets = strdupz(buffer);
-
- snprintfz(buffer, RRD_ID_LENGTH_MAX, "ib_hwcnterrors_%s", p->name);
- p->chart_id_hwerrors = strdupz(buffer);
-
- p->chart_family = strdupz(p->name);
- p->priority = NETDATA_CHART_PRIO_INFINIBAND;
-
- // Link current ibport to last one in the list
- if (ibport_root) {
- struct ibport *t;
- for (t = ibport_root; t->next; t = t->next)
- ;
- t->next = p;
- } else
- ibport_root = p;
-
- return p;
-}
-
-int do_sys_class_infiniband(int update_every, usec_t dt)
-{
- (void)dt;
- static SIMPLE_PATTERN *disabled_list = NULL;
- static int initialized = 0;
- static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES;
- static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1;
- static char *sys_class_infiniband_dirname = NULL;
-
- static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0;
-
- if (unlikely(enable_new_ports == -1)) {
- char dirname[FILENAME_MAX + 1];
-
- snprintfz(dirname, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/infiniband");
- sys_class_infiniband_dirname =
- config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "dirname to monitor", dirname);
-
- do_bytes = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "bandwidth counters", CONFIG_BOOLEAN_YES);
- do_packets = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "packets counters", CONFIG_BOOLEAN_YES);
- do_errors = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "errors counters", CONFIG_BOOLEAN_YES);
- do_hwpackets = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware packets counters", CONFIG_BOOLEAN_AUTO);
- do_hwerrors = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "hardware errors counters", CONFIG_BOOLEAN_AUTO);
-
- enable_only_active = config_get_boolean_ondemand(
- CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "monitor only active ports", CONFIG_BOOLEAN_AUTO);
- disabled_list = simple_pattern_create(
- config_get(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "disable by default interfaces matching", ""),
- NULL,
- SIMPLE_PATTERN_EXACT, true);
-
- dt_to_refresh_ports =
- config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) *
- USEC_PER_SEC;
- if (dt_to_refresh_ports < 0)
- dt_to_refresh_ports = 0;
- }
-
- // init listing of /sys/class/infiniband/ (or rediscovery)
- if (unlikely(!initialized) || unlikely(last_refresh_ports_usec >= dt_to_refresh_ports)) {
- // If folder does not exists, return 1 to disable
- DIR *devices_dir = opendir(sys_class_infiniband_dirname);
- if (unlikely(!devices_dir))
- return 1;
-
- // Work on all device available
- struct dirent *dev_dent;
- while ((dev_dent = readdir(devices_dir))) {
- // Skip special folders
- if (!strcmp(dev_dent->d_name, "..") || !strcmp(dev_dent->d_name, "."))
- continue;
-
- // /sys/class/infiniband/<dev>/ports
- char ports_dirname[FILENAME_MAX + 1];
- snprintfz(ports_dirname, FILENAME_MAX, "%s/%s/%s", sys_class_infiniband_dirname, dev_dent->d_name, "ports");
-
- DIR *ports_dir = opendir(ports_dirname);
- if (unlikely(!ports_dir))
- continue;
-
- struct dirent *port_dent;
- while ((port_dent = readdir(ports_dir))) {
- // Skip special folders
- if (!strcmp(port_dent->d_name, "..") || !strcmp(port_dent->d_name, "."))
- continue;
-
- char buffer[FILENAME_MAX + 1];
-
- // Check if counters are available (mandatory)
- // /sys/class/infiniband/<device>/ports/<port>/counters
- char counters_dirname[FILENAME_MAX + 1];
- snprintfz(counters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "counters");
- DIR *counters_dir = opendir(counters_dirname);
- // Standard counters are mandatory
- if (!counters_dir)
- continue;
- closedir(counters_dir);
-
- // Hardware Counters are optional, used later
- char hwcounters_dirname[FILENAME_MAX + 1];
- snprintfz(
- hwcounters_dirname, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "hw_counters");
-
- // Get new or existing ibport
- struct ibport *p = get_ibport(dev_dent->d_name, port_dent->d_name);
- if (!p)
- continue;
-
- // Prepare configuration
- if (!p->configured) {
- p->configured = 1;
-
- // Enable by default, will be filtered out later
- p->enabled = 1;
-
- p->counters_path = strdupz(counters_dirname);
- p->hwcounters_path = strdupz(hwcounters_dirname);
-
- snprintfz(buffer, FILENAME_MAX, "plugin:proc:/sys/class/infiniband:%s", p->name);
-
- // Standard counters
- p->do_bytes = config_get_boolean_ondemand(buffer, "bytes", do_bytes);
- p->do_packets = config_get_boolean_ondemand(buffer, "packets", do_packets);
- p->do_errors = config_get_boolean_ondemand(buffer, "errors", do_errors);
-
-// Gen filename allocation and concatenation
-#define GEN_DO_COUNTER_NAME(NAME, GRP, DESC, DIR, PORT, ...) \
- PORT->file_##NAME = callocz(1, strlen(PORT->counters_path) + sizeof(#NAME) + 3); \
- strcat(PORT->file_##NAME, PORT->counters_path); \
- strcat(PORT->file_##NAME, "/" #NAME);
- FOREACH_COUNTER(GEN_DO_COUNTER_NAME, p)
-
- // Check HW Counters vendor dependent
- DIR *hwcounters_dir = opendir(hwcounters_dirname);
- if (hwcounters_dir) {
- // By default set standard
- p->do_hwpackets = config_get_boolean_ondemand(buffer, "hwpackets", do_hwpackets);
- p->do_hwerrors = config_get_boolean_ondemand(buffer, "hwerrors", do_hwerrors);
-
-// VENDORS: Set your own
-
-// Allocate the chars to the filenames
-#define GEN_DO_HWCOUNTER_NAME(NAME, GRP, DESC, DIR, PORT, HW, ...) \
- HW->file_##NAME = callocz(1, strlen(PORT->hwcounters_path) + sizeof(#NAME) + 3); \
- strcat(HW->file_##NAME, PORT->hwcounters_path); \
- strcat(HW->file_##NAME, "/" #NAME);
-
- // VENDOR-MLX: Mellanox
- if (strncmp(dev_dent->d_name, "mlx", 3) == 0) {
- // Allocate the vendor specific struct
- p->hwcounters_mlx = callocz(1, sizeof(struct ibporthw_mlx));
-
- FOREACH_HWCOUNTER_MLX(GEN_DO_HWCOUNTER_NAME, p, p->hwcounters_mlx)
-
- // Set the function pointer for hwcounter parsing
- p->hwcounters_parse = &infiniband_hwcounters_parse_mlx;
- p->hwcounters_dorrd = &infiniband_hwcounters_dorrd_mlx;
- }
-
- // VENDOR: Unknown
- else {
- p->do_hwpackets = CONFIG_BOOLEAN_NO;
- p->do_hwerrors = CONFIG_BOOLEAN_NO;
- }
- closedir(hwcounters_dir);
- }
- }
-
- // Check port state to keep activation
- if (enable_only_active) {
- snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "state");
- unsigned long long active;
- // File is "1: DOWN" or "4: ACTIVE", but str2ull will stop on first non-decimal char
- read_single_number_file(buffer, &active);
-
- // Want "IB_PORT_ACTIVE" == "4", as defined by drivers/infiniband/core/sysfs.c::state_show()
- if (active == 4)
- p->enabled = 1;
- else
- p->enabled = 0;
- }
-
- if (p->enabled)
- p->enabled = !simple_pattern_matches(disabled_list, p->name);
-
- // Check / Update the link speed & width frm "rate" file
- // Sample output: "100 Gb/sec (4X EDR)"
- snprintfz(buffer, FILENAME_MAX, "%s/%s/%s", ports_dirname, port_dent->d_name, "rate");
- char buffer_rate[65];
- p->width = 4;
- if (read_file(buffer, buffer_rate, 64)) {
- collector_error("Unable to read '%s'", buffer);
- } else {
- char *buffer_width = strstr(buffer_rate, "(");
- if (buffer_width) {
- buffer_width++;
- // str2ull will stop on first non-decimal value
- p->speed = str2ull(buffer_rate, NULL);
- p->width = str2ull(buffer_width, NULL);
- }
- }
-
- if (!p->discovered)
- collector_info("Infiniband card %s port %s at speed %" PRIu64 " width %" PRIu64 "",
- dev_dent->d_name,
- port_dent->d_name,
- p->speed,
- p->width);
-
- p->discovered = 1;
- }
- closedir(ports_dir);
- }
- closedir(devices_dir);
-
- initialized = 1;
- last_refresh_ports_usec = 0;
- }
- last_refresh_ports_usec += dt;
-
- // Update all ports values
- struct ibport *port;
- for (port = ibport_root; port; port = port->next) {
- if (!port->enabled)
- continue;
- //
- // Read values from system to struct
- //
-
-// counter from file and place it in ibport struct
-#define GEN_DO_COUNTER_READ(NAME, GRP, DESC, DIR, PORT, ...) \
- if (PORT->file_##NAME) { \
- if (read_single_number_file(PORT->file_##NAME, (unsigned long long *)&PORT->NAME)) { \
- collector_error("cannot read iface '%s' counter '" #NAME "'", PORT->name); \
- PORT->file_##NAME = NULL; \
- } \
- }
-
- // Update charts
- if (port->do_bytes != CONFIG_BOOLEAN_NO) {
- // Read values from sysfs
- FOREACH_COUNTER_BYTES(GEN_DO_COUNTER_READ, port)
-
- // First creation of RRD Set (charts)
- if (unlikely(!port->st_bytes)) {
- port->st_bytes = rrdset_create_localhost(
- "Infiniband",
- port->chart_id_bytes,
- NULL,
- port->chart_family,
- "ib.bytes",
- "Bandwidth usage",
- "kilobits/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_INFINIBAND_NAME,
- port->priority + 1,
- update_every,
- RRDSET_TYPE_AREA);
- // Create Dimensions
- rrdset_flag_set(port->st_bytes, RRDSET_FLAG_DETAIL);
- // On this chart, we want to have a KB/s so the dashboard will autoscale it
- // The reported values are also per-lane, so we must multiply it by the width
- // x4 lanes multiplier as per Documentation/ABI/stable/sysfs-class-infiniband
- FOREACH_COUNTER_BYTES(GEN_RRD_DIM_ADD_CUSTOM, port, port->width * 8, 1000, RRD_ALGORITHM_INCREMENTAL)
-
- port->stv_speed = rrdsetvar_custom_chart_variable_add_and_acquire(port->st_bytes, "link_speed");
- }
-
- // Link read values to dimensions
- FOREACH_COUNTER_BYTES(GEN_RRD_DIM_SETP, port)
-
- // For link speed set only variable
- rrdsetvar_custom_chart_variable_set(port->st_bytes, port->stv_speed, port->speed);
-
- rrdset_done(port->st_bytes);
- }
-
- if (port->do_packets != CONFIG_BOOLEAN_NO) {
- // Read values from sysfs
- FOREACH_COUNTER_PACKETS(GEN_DO_COUNTER_READ, port)
-
- // First creation of RRD Set (charts)
- if (unlikely(!port->st_packets)) {
- port->st_packets = rrdset_create_localhost(
- "Infiniband",
- port->chart_id_packets,
- NULL,
- port->chart_family,
- "ib.packets",
- "Packets Statistics",
- "packets/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_INFINIBAND_NAME,
- port->priority + 2,
- update_every,
- RRDSET_TYPE_AREA);
- // Create Dimensions
- rrdset_flag_set(port->st_packets, RRDSET_FLAG_DETAIL);
- FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_ADD, port)
- }
-
- // Link read values to dimensions
- FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_SETP, port)
- rrdset_done(port->st_packets);
- }
-
- if (port->do_errors != CONFIG_BOOLEAN_NO) {
- // Read values from sysfs
- FOREACH_COUNTER_ERRORS(GEN_DO_COUNTER_READ, port)
-
- // First creation of RRD Set (charts)
- if (unlikely(!port->st_errors)) {
- port->st_errors = rrdset_create_localhost(
- "Infiniband",
- port->chart_id_errors,
- NULL,
- port->chart_family,
- "ib.errors",
- "Error Counters",
- "errors/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_INFINIBAND_NAME,
- port->priority + 3,
- update_every,
- RRDSET_TYPE_LINE);
- // Create Dimensions
- rrdset_flag_set(port->st_errors, RRDSET_FLAG_DETAIL);
- FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_ADD, port)
- }
-
- // Link read values to dimensions
- FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_SETP, port)
- rrdset_done(port->st_errors);
- }
-
- //
- // HW Counters
- //
-
- // Call the function for parsing and setting hwcounters
- if (port->hwcounters_parse && port->hwcounters_dorrd) {
- // Read all values (done by vendor-specific function)
- (*port->hwcounters_parse)(port);
-
- if (port->do_hwerrors != CONFIG_BOOLEAN_NO) {
- // First creation of RRD Set (charts)
- if (unlikely(!port->st_hwerrors)) {
- port->st_hwerrors = rrdset_create_localhost(
- "Infiniband",
- port->chart_id_hwerrors,
- NULL,
- port->chart_family,
- "ib.hwerrors",
- "Hardware Errors",
- "errors/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_INFINIBAND_NAME,
- port->priority + 4,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(port->st_hwerrors, RRDSET_FLAG_DETAIL);
-
- // VENDORS: Set your selection
-
- // VENDOR: Mellanox
- if (strncmp(port->name, "mlx", 3) == 0) {
- FOREACH_HWCOUNTER_MLX_ERRORS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
- }
-
- // Unknown vendor, should not happen
- else {
- collector_error(
- "Unmanaged vendor for '%s', do_hwerrors should have been set to no. Please report this bug",
- port->name);
- port->do_hwerrors = CONFIG_BOOLEAN_NO;
- }
- }
- }
-
- if (port->do_hwpackets != CONFIG_BOOLEAN_NO) {
- // First creation of RRD Set (charts)
- if (unlikely(!port->st_hwpackets)) {
- port->st_hwpackets = rrdset_create_localhost(
- "Infiniband",
- port->chart_id_hwpackets,
- NULL,
- port->chart_family,
- "ib.hwpackets",
- "Hardware Packets Statistics",
- "packets/s",
- PLUGIN_PROC_NAME,
- PLUGIN_PROC_MODULE_INFINIBAND_NAME,
- port->priority + 5,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(port->st_hwpackets, RRDSET_FLAG_DETAIL);
-
- // VENDORS: Set your selection
-
- // VENDOR: Mellanox
- if (strncmp(port->name, "mlx", 3) == 0) {
- FOREACH_HWCOUNTER_MLX_PACKETS(GEN_RRD_DIM_ADD_HW, port, port->hwcounters_mlx)
- }
-
- // Unknown vendor, should not happen
- else {
- collector_error(
- "Unmanaged vendor for '%s', do_hwpackets should have been set to no. Please report this bug",
- port->name);
- port->do_hwpackets = CONFIG_BOOLEAN_NO;
- }
- }
- }
-
- // Update values to rrd (done by vendor-specific function)
- (*port->hwcounters_dorrd)(port);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/sys_class_power_supply.c b/collectors/proc.plugin/sys_class_power_supply.c
deleted file mode 100644
index 3f793b9c6..000000000
--- a/collectors/proc.plugin/sys_class_power_supply.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME "/sys/class/power_supply"
-
-const char *ps_property_names[] = { "charge", "energy", "voltage"};
-const char *ps_property_titles[] = {"Battery charge", "Battery energy", "Power supply voltage"};
-const char *ps_property_units[] = { "Ah", "Wh", "V"};
-
-const char *ps_property_dim_names[] = {"empty_design", "empty", "now", "full", "full_design",
- "empty_design", "empty", "now", "full", "full_design",
- "min_design", "min", "now", "max", "max_design"};
-
-struct ps_property_dim {
- char *name;
- char *filename;
- int fd;
-
- RRDDIM *rd;
- unsigned long long value;
- int always_zero;
-
- struct ps_property_dim *next;
-};
-
-struct ps_property {
- char *name;
- char *title;
- char *units;
-
- RRDSET *st;
-
- struct ps_property_dim *property_dim_root;
-
- struct ps_property *next;
-};
-
-struct capacity {
- char *filename;
- int fd;
-
- RRDSET *st;
- RRDDIM *rd;
- unsigned long long value;
-};
-
-struct power_supply {
- char *name;
- uint32_t hash;
- int found;
-
- struct capacity *capacity;
-
- struct ps_property *property_root;
-
- struct power_supply *next;
-};
-
-static struct power_supply *power_supply_root = NULL;
-static int files_num = 0;
-
-void power_supply_free(struct power_supply *ps) {
- if(likely(ps)) {
-
- // free capacity structure
- if(likely(ps->capacity)) {
- if(likely(ps->capacity->st)) rrdset_is_obsolete___safe_from_collector_thread(ps->capacity->st);
- freez(ps->capacity->filename);
- if(likely(ps->capacity->fd != -1)) close(ps->capacity->fd);
- files_num--;
- freez(ps->capacity);
- }
- freez(ps->name);
-
- struct ps_property *pr = ps->property_root;
- while(likely(pr)) {
-
- // free dimensions
- struct ps_property_dim *pd = pr->property_dim_root;
- while(likely(pd)) {
- freez(pd->name);
- freez(pd->filename);
- if(likely(pd->fd != -1)) close(pd->fd);
- files_num--;
- struct ps_property_dim *d = pd;
- pd = pd->next;
- freez(d);
- }
-
- // free properties
- if(likely(pr->st)) rrdset_is_obsolete___safe_from_collector_thread(pr->st);
- freez(pr->name);
- freez(pr->title);
- freez(pr->units);
- struct ps_property *p = pr;
- pr = pr->next;
- freez(p);
- }
-
- // remove power supply from linked list
- if(likely(ps == power_supply_root)) {
- power_supply_root = ps->next;
- }
- else {
- struct power_supply *last;
- for(last = power_supply_root; last && last->next != ps; last = last->next);
- if(likely(last)) last->next = ps->next;
- }
-
- freez(ps);
- }
-}
-
-static void add_labels_to_power_supply(struct power_supply *ps, RRDSET *st) {
- rrdlabels_add(st->rrdlabels, "device", ps->name, RRDLABEL_SRC_AUTO);
-}
-
-int do_sys_class_power_supply(int update_every, usec_t dt) {
- (void)dt;
- static int do_capacity = -1, do_property[3] = {-1};
- static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1;
- static char *dirname = NULL;
-
- if(unlikely(do_capacity == -1)) {
- do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES);
- do_property[0] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery charge", CONFIG_BOOLEAN_NO);
- do_property[1] = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery energy", CONFIG_BOOLEAN_NO);
- do_property[2] = config_get_boolean("plugin:proc:/sys/class/power_supply", "power supply voltage", CONFIG_BOOLEAN_NO);
-
- keep_fds_open_config = config_get_boolean_ondemand("plugin:proc:/sys/class/power_supply", "keep files open", CONFIG_BOOLEAN_AUTO);
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/power_supply");
- dirname = config_get("plugin:proc:/sys/class/power_supply", "directory to monitor", filename);
- }
-
- DIR *dir = opendir(dirname);
- if(unlikely(!dir)) {
- collector_error("Cannot read directory '%s'", dirname);
- return 1;
- }
-
- struct dirent *de = NULL;
- while(likely(de = readdir(dir))) {
- if(likely(de->d_type == DT_DIR
- && (
- (de->d_name[0] == '.' && de->d_name[1] == '\0')
- || (de->d_name[0] == '.' && de->d_name[1] == '.' && de->d_name[2] == '\0')
- )))
- continue;
-
- if(likely(de->d_type == DT_LNK || de->d_type == DT_DIR)) {
- uint32_t hash = simple_hash(de->d_name);
-
- struct power_supply *ps;
- for(ps = power_supply_root; ps; ps = ps->next) {
- if(unlikely(ps->hash == hash && !strcmp(ps->name, de->d_name))) {
- ps->found = 1;
- break;
- }
- }
-
- // allocate memory for power supply and initialize it
- if(unlikely(!ps)) {
- ps = callocz(sizeof(struct power_supply), 1);
- ps->name = strdupz(de->d_name);
- ps->hash = simple_hash(de->d_name);
- ps->found = 1;
- ps->next = power_supply_root;
- power_supply_root = ps;
-
- struct stat stbuf;
- if(likely(do_capacity != CONFIG_BOOLEAN_NO)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s", dirname, de->d_name, "capacity");
- if (stat(filename, &stbuf) == 0) {
- ps->capacity = callocz(sizeof(struct capacity), 1);
- ps->capacity->filename = strdupz(filename);
- ps->capacity->fd = -1;
- files_num++;
- }
- }
-
- // allocate memory and initialize structures for every property and file found
- size_t pr_idx, pd_idx;
- size_t prev_idx = 3; // there is no property with this index
-
- for(pr_idx = 0; pr_idx < 3; pr_idx++) {
- if(unlikely(do_property[pr_idx] != CONFIG_BOOLEAN_NO)) {
- struct ps_property *pr = NULL;
- int min_value_found = 0, max_value_found = 0;
-
- for(pd_idx = pr_idx * 5; pd_idx < pr_idx * 5 + 5; pd_idx++) {
-
- // check if file exists
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/%s/%s_%s", dirname, de->d_name,
- ps_property_names[pr_idx], ps_property_dim_names[pd_idx]);
- if (stat(filename, &stbuf) == 0) {
-
- if(unlikely(pd_idx == pr_idx * 5 + 1))
- min_value_found = 1;
- if(unlikely(pd_idx == pr_idx * 5 + 3))
- max_value_found = 1;
-
- // add chart
- if(unlikely(prev_idx != pr_idx)) {
- pr = callocz(sizeof(struct ps_property), 1);
- pr->name = strdupz(ps_property_names[pr_idx]);
- pr->title = strdupz(ps_property_titles[pr_idx]);
- pr->units = strdupz(ps_property_units[pr_idx]);
- prev_idx = pr_idx;
- pr->next = ps->property_root;
- ps->property_root = pr;
- }
-
- // add dimension
- struct ps_property_dim *pd;
- pd= callocz(sizeof(struct ps_property_dim), 1);
- pd->name = strdupz(ps_property_dim_names[pd_idx]);
- pd->filename = strdupz(filename);
- pd->fd = -1;
- files_num++;
- pd->next = pr->property_dim_root;
- pr->property_dim_root = pd;
- }
- }
-
- // create a zero empty/min dimension
- if(unlikely(max_value_found && !min_value_found)) {
- struct ps_property_dim *pd;
- pd= callocz(sizeof(struct ps_property_dim), 1);
- pd->name = strdupz(ps_property_dim_names[pr_idx * 5 + 1]);
- pd->always_zero = 1;
- pd->next = pr->property_dim_root;
- pr->property_dim_root = pd;
- }
- }
- }
- }
-
- // read capacity file
- if(likely(ps->capacity)) {
- char buffer[30 + 1];
-
- if(unlikely(ps->capacity->fd == -1)) {
- ps->capacity->fd = open(ps->capacity->filename, O_RDONLY, 0666);
- if(unlikely(ps->capacity->fd == -1)) {
- collector_error("Cannot open file '%s'", ps->capacity->filename);
- power_supply_free(ps);
- ps = NULL;
- }
- }
-
- if (ps)
- {
- ssize_t r = read(ps->capacity->fd, buffer, 30);
- if(unlikely(r < 1)) {
- collector_error("Cannot read file '%s'", ps->capacity->filename);
- power_supply_free(ps);
- ps = NULL;
- }
- else {
- buffer[r] = '\0';
- ps->capacity->value = str2ull(buffer, NULL);
-
- if(unlikely(!keep_fds_open)) {
- close(ps->capacity->fd);
- ps->capacity->fd = -1;
- }
- else if(unlikely(lseek(ps->capacity->fd, 0, SEEK_SET) == -1)) {
- collector_error("Cannot seek in file '%s'", ps->capacity->filename);
- close(ps->capacity->fd);
- ps->capacity->fd = -1;
- }
- }
- }
- }
-
- // read property files
- int read_error = 0;
- struct ps_property *pr;
- if (ps)
- {
- for(pr = ps->property_root; pr && !read_error; pr = pr->next) {
- struct ps_property_dim *pd;
- for(pd = pr->property_dim_root; pd; pd = pd->next) {
- if(likely(!pd->always_zero)) {
- char buffer[30 + 1];
-
- if(unlikely(pd->fd == -1)) {
- pd->fd = open(pd->filename, O_RDONLY, 0666);
- if(unlikely(pd->fd == -1)) {
- collector_error("Cannot open file '%s'", pd->filename);
- read_error = 1;
- power_supply_free(ps);
- break;
- }
- }
-
- ssize_t r = read(pd->fd, buffer, 30);
- if(unlikely(r < 1)) {
- collector_error("Cannot read file '%s'", pd->filename);
- read_error = 1;
- power_supply_free(ps);
- break;
- }
- buffer[r] = '\0';
- pd->value = str2ull(buffer, NULL);
-
- if(unlikely(!keep_fds_open)) {
- close(pd->fd);
- pd->fd = -1;
- }
- else if(unlikely(lseek(pd->fd, 0, SEEK_SET) == -1)) {
- collector_error("Cannot seek in file '%s'", pd->filename);
- close(pd->fd);
- pd->fd = -1;
- }
- }
- }
- }
- }
- }
- }
-
- closedir(dir);
-
- keep_fds_open = keep_fds_open_config;
- if(likely(keep_fds_open_config == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(files_num > 32))
- keep_fds_open = CONFIG_BOOLEAN_NO;
- else
- keep_fds_open = CONFIG_BOOLEAN_YES;
- }
-
- // --------------------------------------------------------------------
-
- struct power_supply *ps = power_supply_root;
- while(unlikely(ps)) {
- if(unlikely(!ps->found)) {
- struct power_supply *f = ps;
- ps = ps->next;
- power_supply_free(f);
- continue;
- }
-
- if(likely(ps->capacity)) {
- if(unlikely(!ps->capacity->st)) {
- ps->capacity->st = rrdset_create_localhost(
- "powersupply_capacity"
- , ps->name
- , NULL
- , ps->name
- , "powersupply.capacity"
- , "Battery capacity"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
- , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- add_labels_to_power_supply(ps, ps->capacity->st);
- }
-
- if(unlikely(!ps->capacity->rd)) ps->capacity->rd = rrddim_add(ps->capacity->st, "capacity", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rrddim_set_by_pointer(ps->capacity->st, ps->capacity->rd, ps->capacity->value);
-
- rrdset_done(ps->capacity->st);
- }
-
- struct ps_property *pr;
- for(pr = ps->property_root; pr; pr = pr->next) {
- if(unlikely(!pr->st)) {
- char id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "powersupply_%s", pr->name);
- snprintfz(context, RRD_ID_LENGTH_MAX, "powersupply.%s", pr->name);
-
- pr->st = rrdset_create_localhost(
- id
- , ps->name
- , NULL
- , ps->name
- , context
- , pr->title
- , pr->units
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_POWER_SUPPLY_NAME
- , NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- add_labels_to_power_supply(ps, pr->st);
- }
-
- struct ps_property_dim *pd;
- for(pd = pr->property_dim_root; pd; pd = pd->next) {
- if(unlikely(!pd->rd)) pd->rd = rrddim_add(pr->st, pd->name, NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
- rrddim_set_by_pointer(pr->st, pd->rd, pd->value);
- }
-
- rrdset_done(pr->st);
- }
-
- ps->found = 0;
- ps = ps->next;
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/sys_devices_pci_aer.c b/collectors/proc.plugin/sys_devices_pci_aer.c
deleted file mode 100644
index 563ebf051..000000000
--- a/collectors/proc.plugin/sys_devices_pci_aer.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-static char *pci_aer_dirname = NULL;
-
-typedef enum __attribute__((packed)) {
- AER_DEV_NONFATAL = (1 << 0),
- AER_DEV_CORRECTABLE = (1 << 1),
- AER_DEV_FATAL = (1 << 2),
- AER_ROOTPORT_TOTAL_ERR_COR = (1 << 3),
- AER_ROOTPORT_TOTAL_ERR_FATAL = (1 << 4),
-} AER_TYPE;
-
-struct aer_value {
- kernel_uint_t count;
- RRDDIM *rd;
-};
-
-struct aer_entry {
- bool updated;
-
- STRING *name;
- AER_TYPE type;
-
- procfile *ff;
- DICTIONARY *values;
-
- RRDSET *st;
-};
-
-DICTIONARY *aer_root = NULL;
-
-static bool aer_value_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) {
- struct aer_value *v = old_value;
- struct aer_value *nv = new_value;
-
- v->count = nv->count;
-
- return false;
-}
-
-static void aer_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct aer_entry *a = value;
- a->values = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
- dictionary_register_conflict_callback(a->values, aer_value_conflict_callback, NULL);
-}
-
-static void add_pci_aer(const char *base_dir, const char *d_name, AER_TYPE type) {
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "%s/%s", base_dir, d_name);
- struct aer_entry *a = dictionary_set(aer_root, buffer, NULL, sizeof(struct aer_entry));
-
- if(!a->name)
- a->name = string_strdupz(d_name);
-
- a->type = type;
-}
-
-static bool recursively_find_pci_aer(AER_TYPE types, const char *base_dir, const char *d_name, int depth) {
- if(depth > 100)
- return false;
-
- char buffer[FILENAME_MAX + 1];
- snprintfz(buffer, FILENAME_MAX, "%s/%s", base_dir, d_name);
- DIR *dir = opendir(buffer);
- if(unlikely(!dir)) {
- collector_error("Cannot read PCI_AER directory '%s'", buffer);
- return true;
- }
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR) {
- if(de->d_name[0] == '.')
- continue;
-
- recursively_find_pci_aer(types, buffer, de->d_name, depth + 1);
- }
- else if(de->d_type == DT_REG) {
- if((types & AER_DEV_NONFATAL) && strcmp(de->d_name, "aer_dev_nonfatal") == 0) {
- add_pci_aer(buffer, de->d_name, AER_DEV_NONFATAL);
- }
- else if((types & AER_DEV_CORRECTABLE) && strcmp(de->d_name, "aer_dev_correctable") == 0) {
- add_pci_aer(buffer, de->d_name, AER_DEV_CORRECTABLE);
- }
- else if((types & AER_DEV_FATAL) && strcmp(de->d_name, "aer_dev_fatal") == 0) {
- add_pci_aer(buffer, de->d_name, AER_DEV_FATAL);
- }
- else if((types & AER_ROOTPORT_TOTAL_ERR_COR) && strcmp(de->d_name, "aer_rootport_total_err_cor") == 0) {
- add_pci_aer(buffer, de->d_name, AER_ROOTPORT_TOTAL_ERR_COR);
- }
- else if((types & AER_ROOTPORT_TOTAL_ERR_FATAL) && strcmp(de->d_name, "aer_rootport_total_err_fatal") == 0) {
- add_pci_aer(buffer, de->d_name, AER_ROOTPORT_TOTAL_ERR_FATAL);
- }
- }
- }
- closedir(dir);
- return true;
-}
-
-static void find_all_pci_aer(AER_TYPE types) {
- char name[FILENAME_MAX + 1];
- snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices");
- pci_aer_dirname = config_get("plugin:proc:/sys/devices/pci/aer", "directory to monitor", name);
-
- DIR *dir = opendir(pci_aer_dirname);
- if(unlikely(!dir)) {
- collector_error("Cannot read PCI_AER directory '%s'", pci_aer_dirname);
- return;
- }
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR && de->d_name[0] == 'p' && de->d_name[1] == 'c' && de->d_name[2] == 'i' && isdigit(de->d_name[3]))
- recursively_find_pci_aer(types, pci_aer_dirname, de->d_name, 1);
- }
- closedir(dir);
-}
-
-static void read_pci_aer_values(const char *filename, struct aer_entry *t) {
- t->updated = false;
-
- if(unlikely(!t->ff)) {
- t->ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!t->ff))
- return;
- }
-
- t->ff = procfile_readall(t->ff);
- if(unlikely(!t->ff || procfile_lines(t->ff) < 1 || procfile_linewords(t->ff, 0) < 1))
- return;
-
- size_t lines = procfile_lines(t->ff);
- for(size_t l = 0; l < lines ; l++) {
- if(procfile_linewords(t->ff, l) != 2)
- continue;
-
- struct aer_value v = {
- .count = str2ull(procfile_lineword(t->ff, l, 1), NULL)
- };
-
- char *key = procfile_lineword(t->ff, l, 0);
- if(!key || !*key || (key[0] == 'T' && key[1] == 'O' && key[2] == 'T' && key[3] == 'A' && key[4] == 'L' && key[5] == '_'))
- continue;
-
- dictionary_set(t->values, key, &v, sizeof(v));
- }
-
- t->updated = true;
-}
-
-static void read_pci_aer_count(const char *filename, struct aer_entry *t) {
- t->updated = false;
-
- if(unlikely(!t->ff)) {
- t->ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!t->ff))
- return;
- }
-
- t->ff = procfile_readall(t->ff);
- if(unlikely(!t->ff || procfile_lines(t->ff) < 1 || procfile_linewords(t->ff, 0) < 1))
- return;
-
- struct aer_value v = {
- .count = str2ull(procfile_lineword(t->ff, 0, 0), NULL)
- };
- dictionary_set(t->values, "count", &v, sizeof(v));
- t->updated = true;
-}
-
-static void add_label_from_link(struct aer_entry *a, const char *path, const char *link) {
- char name[FILENAME_MAX + 1];
- strncpyz(name, path, FILENAME_MAX);
- char *slash = strrchr(name, '/');
- if(slash)
- *slash = '\0';
-
- char name2[FILENAME_MAX + 1];
- snprintfz(name2, FILENAME_MAX, "%s/%s", name, link);
-
- ssize_t len = readlink(name2, name, FILENAME_MAX);
- if(len != -1) {
- name[len] = '\0'; // Null-terminate the string
- slash = strrchr(name, '/');
- if(slash) slash++;
- else slash = name;
- rrdlabels_add(a->st->rrdlabels, link, slash, RRDLABEL_SRC_AUTO);
- }
-}
-
-int do_proc_sys_devices_pci_aer(int update_every, usec_t dt __maybe_unused) {
- if(unlikely(!aer_root)) {
- int do_root_ports = CONFIG_BOOLEAN_AUTO;
- int do_pci_slots = CONFIG_BOOLEAN_NO;
-
- char buffer[100 + 1] = "";
- rrdlabels_get_value_strcpyz(localhost->rrdlabels, buffer, 100, "_virtualization");
- if(strcmp(buffer, "none") != 0) {
- // no need to run on virtualized environments
- do_root_ports = CONFIG_BOOLEAN_NO;
- do_pci_slots = CONFIG_BOOLEAN_NO;
- }
-
- do_root_ports = config_get_boolean("plugin:proc:/sys/class/pci/aer", "enable root ports", do_root_ports);
- do_pci_slots = config_get_boolean("plugin:proc:/sys/class/pci/aer", "enable pci slots", do_pci_slots);
-
- if(!do_root_ports && !do_pci_slots)
- return 1;
-
- aer_root = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
- dictionary_register_insert_callback(aer_root, aer_insert_callback, NULL);
-
- AER_TYPE types = ((do_root_ports) ? (AER_ROOTPORT_TOTAL_ERR_COR|AER_ROOTPORT_TOTAL_ERR_FATAL) : 0) |
- ((do_pci_slots) ? (AER_DEV_FATAL|AER_DEV_NONFATAL|AER_DEV_CORRECTABLE) : 0);
-
- find_all_pci_aer(types);
-
- if(!dictionary_entries(aer_root))
- return 1;
- }
-
- struct aer_entry *a;
- dfe_start_read(aer_root, a) {
- switch(a->type) {
- case AER_DEV_NONFATAL:
- case AER_DEV_FATAL:
- case AER_DEV_CORRECTABLE:
- read_pci_aer_values(a_dfe.name, a);
- break;
-
- case AER_ROOTPORT_TOTAL_ERR_COR:
- case AER_ROOTPORT_TOTAL_ERR_FATAL:
- read_pci_aer_count(a_dfe.name, a);
- break;
- }
-
- if(!a->updated)
- continue;
-
- if(!a->st) {
- const char *title = "";
- const char *context = "";
-
- switch(a->type) {
- case AER_DEV_NONFATAL:
- title = "PCI Advanced Error Reporting (AER) Non-Fatal Errors";
- context = "pci.aer_nonfatal";
- break;
-
- case AER_DEV_FATAL:
- title = "PCI Advanced Error Reporting (AER) Fatal Errors";
- context = "pci.aer_fatal";
- break;
-
- case AER_DEV_CORRECTABLE:
- title = "PCI Advanced Error Reporting (AER) Correctable Errors";
- context = "pci.aer_correctable";
- break;
-
- case AER_ROOTPORT_TOTAL_ERR_COR:
- title = "PCI Root-Port Advanced Error Reporting (AER) Correctable Errors";
- context = "pci.rootport_aer_correctable";
- break;
-
- case AER_ROOTPORT_TOTAL_ERR_FATAL:
- title = "PCI Root-Port Advanced Error Reporting (AER) Fatal Errors";
- context = "pci.rootport_aer_fatal";
- break;
-
- default:
- title = "Unknown PCI Advanced Error Reporting";
- context = "pci.unknown_aer";
- break;
- }
-
- char id[RRD_ID_LENGTH_MAX + 1];
- char nm[RRD_ID_LENGTH_MAX + 1];
- size_t len = strlen(pci_aer_dirname);
-
- const char *fname = a_dfe.name;
- if(strncmp(a_dfe.name, pci_aer_dirname, len) == 0)
- fname = &a_dfe.name[len];
-
- if(*fname == '/')
- fname++;
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_%s", &context[4], fname);
- char *slash = strrchr(id, '/');
- if(slash)
- *slash = '\0';
-
- netdata_fix_chart_id(id);
-
- snprintfz(nm, RRD_ID_LENGTH_MAX, "%s", fname);
- slash = strrchr(nm, '/');
- if(slash)
- *slash = '\0';
-
- a->st = rrdset_create_localhost(
- "pci"
- , id
- , NULL
- , "aer"
- , context
- , title
- , "errors/s"
- , PLUGIN_PROC_NAME
- , "/sys/devices/pci/aer"
- , NETDATA_CHART_PRIO_PCI_AER
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(a->st->rrdlabels, "device", nm, RRDLABEL_SRC_AUTO);
- add_label_from_link(a, a_dfe.name, "driver");
-
- struct aer_value *v;
- dfe_start_read(a->values, v) {
- v->rd = rrddim_add(a->st, v_dfe.name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
- dfe_done(v);
- }
-
- struct aer_value *v;
- dfe_start_read(a->values, v) {
- if(unlikely(!v->rd))
- v->rd = rrddim_add(a->st, v_dfe.name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- rrddim_set_by_pointer(a->st, v->rd, (collected_number)v->count);
- }
- dfe_done(v);
-
- rrdset_done(a->st);
- }
- dfe_done(a);
-
- return 0;
-}
diff --git a/collectors/proc.plugin/sys_devices_system_edac_mc.c b/collectors/proc.plugin/sys_devices_system_edac_mc.c
deleted file mode 100644
index fdaa22cb7..000000000
--- a/collectors/proc.plugin/sys_devices_system_edac_mc.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-struct edac_count {
- bool updated;
- char *filename;
- procfile *ff;
- kernel_uint_t count;
- RRDDIM *rd;
-};
-
-struct edac_dimm {
- char *name;
-
- struct edac_count ce;
- struct edac_count ue;
-
- RRDSET *st;
-
- struct edac_dimm *prev, *next;
-};
-
-struct mc {
- char *name;
-
- struct edac_count ce;
- struct edac_count ue;
- struct edac_count ce_noinfo;
- struct edac_count ue_noinfo;
-
- RRDSET *st;
-
- struct edac_dimm *dimms;
-
- struct mc *prev, *next;
-};
-
-static struct mc *mc_root = NULL;
-static char *mc_dirname = NULL;
-
-static void find_all_mc() {
- char name[FILENAME_MAX + 1];
- snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/edac/mc");
- mc_dirname = config_get("plugin:proc:/sys/devices/system/edac/mc", "directory to monitor", name);
-
- DIR *dir = opendir(mc_dirname);
- if(unlikely(!dir)) {
- collector_error("Cannot read EDAC memory errors directory '%s'", mc_dirname);
- return;
- }
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR && de->d_name[0] == 'm' && de->d_name[1] == 'c' && isdigit(de->d_name[2])) {
- struct mc *m = callocz(1, sizeof(struct mc));
- m->name = strdupz(de->d_name);
-
- struct stat st;
-
- snprintfz(name, FILENAME_MAX, "%s/%s/ce_count", mc_dirname, de->d_name);
- if(stat(name, &st) != -1)
- m->ce.filename = strdupz(name);
-
- snprintfz(name, FILENAME_MAX, "%s/%s/ue_count", mc_dirname, de->d_name);
- if(stat(name, &st) != -1)
- m->ue.filename = strdupz(name);
-
- snprintfz(name, FILENAME_MAX, "%s/%s/ce_noinfo_count", mc_dirname, de->d_name);
- if(stat(name, &st) != -1)
- m->ce_noinfo.filename = strdupz(name);
-
- snprintfz(name, FILENAME_MAX, "%s/%s/ue_noinfo_count", mc_dirname, de->d_name);
- if(stat(name, &st) != -1)
- m->ue_noinfo.filename = strdupz(name);
-
- if(!m->ce.filename && !m->ue.filename && !m->ce_noinfo.filename && !m->ue_noinfo.filename) {
- freez(m->name);
- freez(m);
- }
- else
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(mc_root, m, prev, next);
- }
- }
- closedir(dir);
-
- for(struct mc *m = mc_root; m ;m = m->next) {
- snprintfz(name, FILENAME_MAX, "%s/%s", mc_dirname, m->name);
- dir = opendir(name);
- if(!dir) {
- collector_error("Cannot read EDAC memory errors directory '%s'", name);
- continue;
- }
-
- while((de = readdir(dir))) {
- // it can be dimmX or rankX directory
- // https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5
-
- if (de->d_type == DT_DIR &&
- ((strncmp(de->d_name, "rank", 4) == 0 || strncmp(de->d_name, "dimm", 4) == 0)) &&
- isdigit(de->d_name[4])) {
-
- struct edac_dimm *d = callocz(1, sizeof(struct edac_dimm));
- d->name = strdupz(de->d_name);
-
- struct stat st;
-
- snprintfz(name, FILENAME_MAX, "%s/%s/%s/dimm_ce_count", mc_dirname, m->name, de->d_name);
- if(stat(name, &st) != -1)
- d->ce.filename = strdupz(name);
-
- snprintfz(name, FILENAME_MAX, "%s/%s/%s/dimm_ue_count", mc_dirname, m->name, de->d_name);
- if(stat(name, &st) != -1)
- d->ue.filename = strdupz(name);
-
- if(!d->ce.filename && !d->ue.filename) {
- freez(d->name);
- freez(d);
- }
- else
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(m->dimms, d, prev, next);
- }
- }
- closedir(dir);
- }
-}
-
-static kernel_uint_t read_edac_count(struct edac_count *t) {
- t->updated = false;
- t->count = 0;
-
- if(t->filename) {
- if(unlikely(!t->ff)) {
- t->ff = procfile_open(t->filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!t->ff))
- return 0;
- }
-
- t->ff = procfile_readall(t->ff);
- if(unlikely(!t->ff || procfile_lines(t->ff) < 1 || procfile_linewords(t->ff, 0) < 1))
- return 0;
-
- t->count = str2ull(procfile_lineword(t->ff, 0, 0), NULL);
- t->updated = true;
- }
-
- return t->count;
-}
-
-static bool read_edac_mc_file(const char *mc, const char *filename, char *out, size_t out_size) {
- char f[FILENAME_MAX + 1];
- snprintfz(f, FILENAME_MAX, "%s/%s/%s", mc_dirname, mc, filename);
- if(read_file(f, out, out_size) != 0) {
- collector_error("EDAC: cannot read file '%s'", f);
- return false;
- }
- return true;
-}
-
-static bool read_edac_mc_rank_file(const char *mc, const char *rank, const char *filename, char *out, size_t out_size) {
- char f[FILENAME_MAX + 1];
- snprintfz(f, FILENAME_MAX, "%s/%s/%s/%s", mc_dirname, mc, rank, filename);
- if(read_file(f, out, out_size) != 0) {
- collector_error("EDAC: cannot read file '%s'", f);
- return false;
- }
- return true;
-}
-
-int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt __maybe_unused) {
- if(unlikely(!mc_root)) {
- find_all_mc();
-
- if(!mc_root)
- // don't call this again
- return 1;
- }
-
- for(struct mc *m = mc_root; m; m = m->next) {
- read_edac_count(&m->ce);
- read_edac_count(&m->ce_noinfo);
- read_edac_count(&m->ue);
- read_edac_count(&m->ue_noinfo);
-
- for(struct edac_dimm *d = m->dimms; d ;d = d->next) {
- read_edac_count(&d->ce);
- read_edac_count(&d->ue);
- }
- }
-
- // --------------------------------------------------------------------
-
- for(struct mc *m = mc_root; m ; m = m->next) {
- if(unlikely(!m->ce.updated && !m->ue.updated && !m->ce_noinfo.updated && !m->ue_noinfo.updated))
- continue;
-
- if(unlikely(!m->st)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "edac_%s", m->name);
- m->st = rrdset_create_localhost(
- "mem"
- , id
- , NULL
- , "edac"
- , "mem.edac_mc"
- , "Memory Controller (MC) Error Detection And Correction (EDAC) Errors"
- , "errors/s"
- , PLUGIN_PROC_NAME
- , "/sys/devices/system/edac/mc"
- , NETDATA_CHART_PRIO_MEM_HW_ECC_CE
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(m->st->rrdlabels, "controller", m->name, RRDLABEL_SRC_AUTO);
-
- char buffer[1024 + 1];
-
- if(read_edac_mc_file(m->name, "mc_name", buffer, 1024))
- rrdlabels_add(m->st->rrdlabels, "mc_name", buffer, RRDLABEL_SRC_AUTO);
-
- if(read_edac_mc_file(m->name, "size_mb", buffer, 1024))
- rrdlabels_add(m->st->rrdlabels, "size_mb", buffer, RRDLABEL_SRC_AUTO);
-
- if(read_edac_mc_file(m->name, "max_location", buffer, 1024))
- rrdlabels_add(m->st->rrdlabels, "max_location", buffer, RRDLABEL_SRC_AUTO);
-
- m->ce.rd = rrddim_add(m->st, "correctable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- m->ue.rd = rrddim_add(m->st, "uncorrectable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- m->ce_noinfo.rd = rrddim_add(m->st, "correctable_noinfo", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- m->ue_noinfo.rd = rrddim_add(m->st, "uncorrectable_noinfo", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(m->st, m->ce.rd, (collected_number)m->ce.count);
- rrddim_set_by_pointer(m->st, m->ue.rd, (collected_number)m->ue.count);
- rrddim_set_by_pointer(m->st, m->ce_noinfo.rd, (collected_number)m->ce_noinfo.count);
- rrddim_set_by_pointer(m->st, m->ue_noinfo.rd, (collected_number)m->ue_noinfo.count);
-
- rrdset_done(m->st);
-
- for(struct edac_dimm *d = m->dimms; d ;d = d->next) {
- if(unlikely(!d->ce.updated && !d->ue.updated))
- continue;
-
- if(unlikely(!d->st)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "edac_%s_%s", m->name, d->name);
- d->st = rrdset_create_localhost(
- "mem"
- , id
- , NULL
- , "edac"
- , "mem.edac_mc_dimm"
- , "DIMM Error Detection And Correction (EDAC) Errors"
- , "errors/s"
- , PLUGIN_PROC_NAME
- , "/sys/devices/system/edac/mc"
- , NETDATA_CHART_PRIO_MEM_HW_ECC_CE + 1
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(d->st->rrdlabels, "controller", m->name, RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st->rrdlabels, "dimm", d->name, RRDLABEL_SRC_AUTO);
-
- char buffer[1024 + 1];
-
- if (read_edac_mc_rank_file(m->name, d->name, "dimm_dev_type", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "dimm_dev_type", buffer, RRDLABEL_SRC_AUTO);
-
- if (read_edac_mc_rank_file(m->name, d->name, "dimm_edac_mode", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "dimm_edac_mode", buffer, RRDLABEL_SRC_AUTO);
-
- if (read_edac_mc_rank_file(m->name, d->name, "dimm_label", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "dimm_label", buffer, RRDLABEL_SRC_AUTO);
-
- if (read_edac_mc_rank_file(m->name, d->name, "dimm_location", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "dimm_location", buffer, RRDLABEL_SRC_AUTO);
-
- if (read_edac_mc_rank_file(m->name, d->name, "dimm_mem_type", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "dimm_mem_type", buffer, RRDLABEL_SRC_AUTO);
-
- if (read_edac_mc_rank_file(m->name, d->name, "size", buffer, 1024))
- rrdlabels_add(d->st->rrdlabels, "size", buffer, RRDLABEL_SRC_AUTO);
-
- d->ce.rd = rrddim_add(d->st, "correctable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->ue.rd = rrddim_add(d->st, "uncorrectable", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(d->st, d->ce.rd, (collected_number)d->ce.count);
- rrddim_set_by_pointer(d->st, d->ue.rd, (collected_number)d->ue.count);
-
- rrdset_done(d->st);
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/sys_devices_system_node.c b/collectors/proc.plugin/sys_devices_system_node.c
deleted file mode 100644
index d6db94a27..000000000
--- a/collectors/proc.plugin/sys_devices_system_node.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-struct node {
- char *name;
- char *numastat_filename;
- procfile *numastat_ff;
- RRDSET *numastat_st;
- struct node *next;
-};
-static struct node *numa_root = NULL;
-
-static int find_all_nodes() {
- int numa_node_count = 0;
- char name[FILENAME_MAX + 1];
- snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
- char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
-
- DIR *dir = opendir(dirname);
- if(!dir) {
- collector_error("Cannot read NUMA node directory '%s'", dirname);
- return 0;
- }
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type != DT_DIR)
- continue;
-
- if(strncmp(de->d_name, "node", 4) != 0)
- continue;
-
- if(!isdigit(de->d_name[4]))
- continue;
-
- numa_node_count++;
-
- struct node *m = callocz(1, sizeof(struct node));
- m->name = strdupz(de->d_name);
-
- struct stat st;
-
- snprintfz(name, FILENAME_MAX, "%s/%s/numastat", dirname, de->d_name);
- if(stat(name, &st) == -1) {
- freez(m->name);
- freez(m);
- continue;
- }
-
- m->numastat_filename = strdupz(name);
-
- m->next = numa_root;
- numa_root = m;
- }
-
- closedir(dir);
-
- return numa_node_count;
-}
-
-int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
- (void)dt;
-
- static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0;
- static int do_numastat = -1, numa_node_count = 0;
- struct node *m;
-
- if(unlikely(numa_root == NULL)) {
- numa_node_count = find_all_nodes();
- if(unlikely(numa_root == NULL))
- return 1;
- }
-
- if(unlikely(do_numastat == -1)) {
- do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO);
-
- hash_local_node = simple_hash("local_node");
- hash_numa_foreign = simple_hash("numa_foreign");
- hash_interleave_hit = simple_hash("interleave_hit");
- hash_other_node = simple_hash("other_node");
- hash_numa_hit = simple_hash("numa_hit");
- hash_numa_miss = simple_hash("numa_miss");
- }
-
- if(do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO &&
- (numa_node_count >= 2 || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- for(m = numa_root; m; m = m->next) {
- if(m->numastat_filename) {
-
- if(unlikely(!m->numastat_ff)) {
- m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT);
-
- if(unlikely(!m->numastat_ff))
- continue;
- }
-
- m->numastat_ff = procfile_readall(m->numastat_ff);
- if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1))
- continue;
-
- if(unlikely(!m->numastat_st)) {
- m->numastat_st = rrdset_create_localhost(
- "mem"
- , m->name
- , NULL
- , "numa"
- , "mem.numa_nodes"
- , "NUMA events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , "/sys/devices/system/node"
- , NETDATA_CHART_PRIO_MEM_NUMA_NODES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(m->numastat_st->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO);
-
- rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- }
-
- size_t lines = procfile_lines(m->numastat_ff), l;
- for(l = 0; l < lines; l++) {
- size_t words = procfile_linewords(m->numastat_ff, l);
-
- if(unlikely(words < 2)) {
- if(unlikely(words))
- collector_error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words);
- continue;
- }
-
- char *name = procfile_lineword(m->numastat_ff, l, 0);
- char *value = procfile_lineword(m->numastat_ff, l, 1);
-
- if (unlikely(!name || !*name || !value || !*value))
- continue;
-
- uint32_t hash = simple_hash(name);
- if(likely(
- (hash == hash_numa_hit && !strcmp(name, "numa_hit"))
- || (hash == hash_numa_miss && !strcmp(name, "numa_miss"))
- || (hash == hash_local_node && !strcmp(name, "local_node"))
- || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign"))
- || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit"))
- || (hash == hash_other_node && !strcmp(name, "other_node"))
- ))
- rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value));
- }
-
- rrdset_done(m->numastat_st);
- }
- }
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/sys_fs_btrfs.c b/collectors/proc.plugin/sys_fs_btrfs.c
deleted file mode 100644
index f1d6fe720..000000000
--- a/collectors/proc.plugin/sys_fs_btrfs.c
+++ /dev/null
@@ -1,1155 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_BTRFS_NAME "/sys/fs/btrfs"
-
-typedef struct btrfs_disk {
- char *name;
- uint32_t hash;
- int exists;
-
- char *size_filename;
- unsigned long long size;
-
- struct btrfs_disk *next;
-} BTRFS_DISK;
-
-typedef struct btrfs_device {
- int id;
- int exists;
-
- char *error_stats_filename;
- RRDSET *st_error_stats;
- RRDDIM *rd_write_errs;
- RRDDIM *rd_read_errs;
- RRDDIM *rd_flush_errs;
- RRDDIM *rd_corruption_errs;
- RRDDIM *rd_generation_errs;
- collected_number write_errs;
- collected_number read_errs;
- collected_number flush_errs;
- collected_number corruption_errs;
- collected_number generation_errs;
-
- struct btrfs_device *next;
-} BTRFS_DEVICE;
-
-typedef struct btrfs_node {
- int exists;
- int logged_error;
-
- char *id;
- uint32_t hash;
-
- char *label;
-
- #define declare_btrfs_allocation_section_field(SECTION, FIELD) \
- char *allocation_ ## SECTION ## _ ## FIELD ## _filename; \
- unsigned long long int allocation_ ## SECTION ## _ ## FIELD;
-
- #define declare_btrfs_allocation_field(FIELD) \
- char *allocation_ ## FIELD ## _filename; \
- unsigned long long int allocation_ ## FIELD;
-
- RRDSET *st_allocation_disks;
- RRDDIM *rd_allocation_disks_unallocated;
- RRDDIM *rd_allocation_disks_data_used;
- RRDDIM *rd_allocation_disks_data_free;
- RRDDIM *rd_allocation_disks_metadata_used;
- RRDDIM *rd_allocation_disks_metadata_free;
- RRDDIM *rd_allocation_disks_system_used;
- RRDDIM *rd_allocation_disks_system_free;
- unsigned long long all_disks_total;
-
- RRDSET *st_allocation_data;
- RRDDIM *rd_allocation_data_free;
- RRDDIM *rd_allocation_data_used;
- declare_btrfs_allocation_section_field(data, total_bytes)
- declare_btrfs_allocation_section_field(data, bytes_used)
- declare_btrfs_allocation_section_field(data, disk_total)
- declare_btrfs_allocation_section_field(data, disk_used)
-
- RRDSET *st_allocation_metadata;
- RRDDIM *rd_allocation_metadata_free;
- RRDDIM *rd_allocation_metadata_used;
- RRDDIM *rd_allocation_metadata_reserved;
- declare_btrfs_allocation_section_field(metadata, total_bytes)
- declare_btrfs_allocation_section_field(metadata, bytes_used)
- declare_btrfs_allocation_section_field(metadata, disk_total)
- declare_btrfs_allocation_section_field(metadata, disk_used)
- //declare_btrfs_allocation_field(global_rsv_reserved)
- declare_btrfs_allocation_field(global_rsv_size)
-
- RRDSET *st_allocation_system;
- RRDDIM *rd_allocation_system_free;
- RRDDIM *rd_allocation_system_used;
- declare_btrfs_allocation_section_field(system, total_bytes)
- declare_btrfs_allocation_section_field(system, bytes_used)
- declare_btrfs_allocation_section_field(system, disk_total)
- declare_btrfs_allocation_section_field(system, disk_used)
-
- // --------------------------------------------------------------------
- // commit stats
-
- char *commit_stats_filename;
-
- RRDSET *st_commits;
- RRDDIM *rd_commits;
- long long commits_total;
- collected_number commits_new;
-
- RRDSET *st_commits_percentage_time;
- RRDDIM *rd_commits_percentage_time;
- long long commit_timings_total;
- long long commits_percentage_time;
-
- RRDSET *st_commit_timings;
- RRDDIM *rd_commit_timings_last;
- RRDDIM *rd_commit_timings_max;
- collected_number commit_timings_last;
- collected_number commit_timings_max;
-
- BTRFS_DISK *disks;
-
- BTRFS_DEVICE *devices;
-
- struct btrfs_node *next;
-} BTRFS_NODE;
-
-static BTRFS_NODE *nodes = NULL;
-
-static inline int collect_btrfs_error_stats(BTRFS_DEVICE *device){
- char buffer[120 + 1];
-
- int ret = read_file(device->error_stats_filename, buffer, 120);
- if(unlikely(ret)) {
- collector_error("BTRFS: failed to read '%s'", device->error_stats_filename);
- device->write_errs = 0;
- device->read_errs = 0;
- device->flush_errs = 0;
- device->corruption_errs = 0;
- device->generation_errs = 0;
- return ret;
- }
-
- char *p = buffer;
- while(p){
- char *val = strsep_skip_consecutive_separators(&p, "\n");
- if(unlikely(!val || !*val)) break;
- char *key = strsep_skip_consecutive_separators(&val, " ");
-
- if(!strcmp(key, "write_errs")) device->write_errs = str2ull(val, NULL);
- else if(!strcmp(key, "read_errs")) device->read_errs = str2ull(val, NULL);
- else if(!strcmp(key, "flush_errs")) device->flush_errs = str2ull(val, NULL);
- else if(!strcmp(key, "corruption_errs")) device->corruption_errs = str2ull(val, NULL);
- else if(!strcmp(key, "generation_errs")) device->generation_errs = str2ull(val, NULL);
- }
- return 0;
-}
-
-static inline int collect_btrfs_commits_stats(BTRFS_NODE *node, int update_every){
- char buffer[120 + 1];
-
- int ret = read_file(node->commit_stats_filename, buffer, 120);
- if(unlikely(ret)) {
- collector_error("BTRFS: failed to read '%s'", node->commit_stats_filename);
- node->commits_total = 0;
- node->commits_new = 0;
- node->commit_timings_last = 0;
- node->commit_timings_max = 0;
- node->commit_timings_total = 0;
- node->commits_percentage_time = 0;
-
- return ret;
- }
-
- char *p = buffer;
- while(p){
- char *val = strsep_skip_consecutive_separators(&p, "\n");
- if(unlikely(!val || !*val)) break;
- char *key = strsep_skip_consecutive_separators(&val, " ");
-
- if(!strcmp(key, "commits")){
- long long commits_total_new = str2ull(val, NULL);
- if(likely(node->commits_total)){
- if((node->commits_new = commits_total_new - node->commits_total))
- node->commits_total = commits_total_new;
- } else node->commits_total = commits_total_new;
- }
- else if(!strcmp(key, "last_commit_ms")) node->commit_timings_last = str2ull(val, NULL);
- else if(!strcmp(key, "max_commit_ms")) node->commit_timings_max = str2ull(val, NULL);
- else if(!strcmp(key, "total_commit_ms")) {
- long long commit_timings_total_new = str2ull(val, NULL);
- if(likely(node->commit_timings_total)){
- long time_delta = commit_timings_total_new - node->commit_timings_total;
- if(time_delta){
- node->commits_percentage_time = time_delta * 10 / update_every;
- node->commit_timings_total = commit_timings_total_new;
- } else node->commits_percentage_time = 0;
-
- } else node->commit_timings_total = commit_timings_total_new;
- }
- }
- return 0;
-}
-
-static inline void btrfs_free_commits_stats(BTRFS_NODE *node){
- if(node->st_commits){
- rrdset_is_obsolete___safe_from_collector_thread(node->st_commits);
- rrdset_is_obsolete___safe_from_collector_thread(node->st_commit_timings);
- }
- freez(node->commit_stats_filename);
- node->commit_stats_filename = NULL;
-}
-
-static inline void btrfs_free_disk(BTRFS_DISK *d) {
- freez(d->name);
- freez(d->size_filename);
- freez(d);
-}
-
-static inline void btrfs_free_device(BTRFS_DEVICE *d) {
- if(d->st_error_stats)
- rrdset_is_obsolete___safe_from_collector_thread(d->st_error_stats);
- freez(d->error_stats_filename);
- freez(d);
-}
-
-static inline void btrfs_free_node(BTRFS_NODE *node) {
- // collector_info("BTRFS: destroying '%s'", node->id);
-
- if(node->st_allocation_disks)
- rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_disks);
-
- if(node->st_allocation_data)
- rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_data);
-
- if(node->st_allocation_metadata)
- rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_metadata);
-
- if(node->st_allocation_system)
- rrdset_is_obsolete___safe_from_collector_thread(node->st_allocation_system);
-
- freez(node->allocation_data_bytes_used_filename);
- freez(node->allocation_data_total_bytes_filename);
-
- freez(node->allocation_metadata_bytes_used_filename);
- freez(node->allocation_metadata_total_bytes_filename);
-
- freez(node->allocation_system_bytes_used_filename);
- freez(node->allocation_system_total_bytes_filename);
-
- btrfs_free_commits_stats(node);
-
- while(node->disks) {
- BTRFS_DISK *d = node->disks;
- node->disks = node->disks->next;
- btrfs_free_disk(d);
- }
-
- while(node->devices) {
- BTRFS_DEVICE *d = node->devices;
- node->devices = node->devices->next;
- btrfs_free_device(d);
- }
-
- freez(node->label);
- freez(node->id);
- freez(node);
-}
-
-static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) {
- char filename[FILENAME_MAX + 1];
-
- node->all_disks_total = 0;
-
- BTRFS_DISK *d;
- for(d = node->disks ; d ; d = d->next)
- d->exists = 0;
-
- DIR *dir = opendir(path);
- if (!dir) {
- if(!node->logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
- node->logged_error = 1;
- }
- return 1;
- }
- node->logged_error = 0;
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type != DT_LNK
- || !strcmp(de->d_name, ".")
- || !strcmp(de->d_name, "..")
- ) {
- // collector_info("BTRFS: ignoring '%s'", de->d_name);
- continue;
- }
-
- uint32_t hash = simple_hash(de->d_name);
-
- // --------------------------------------------------------------------
- // search for it
-
- for(d = node->disks ; d ; d = d->next) {
- if(hash == d->hash && !strcmp(de->d_name, d->name))
- break;
- }
-
- // --------------------------------------------------------------------
- // did we find it?
-
- if(!d) {
- d = callocz(sizeof(BTRFS_DISK), 1);
-
- d->name = strdupz(de->d_name);
- d->hash = simple_hash(d->name);
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/size", path, de->d_name);
- d->size_filename = strdupz(filename);
-
- // link it
- d->next = node->disks;
- node->disks = d;
- }
-
- d->exists = 1;
-
-
- // --------------------------------------------------------------------
- // update the values
-
- if(read_single_number_file(d->size_filename, &d->size) != 0) {
- collector_error("BTRFS: failed to read '%s'", d->size_filename);
- d->exists = 0;
- continue;
- }
-
- // /sys/block/<name>/size is in fixed-size sectors of 512 bytes
- // https://github.com/torvalds/linux/blob/v6.2/block/genhd.c#L946-L950
- // https://github.com/torvalds/linux/blob/v6.2/include/linux/types.h#L120-L121
- // (also see #3481, #3483)
- node->all_disks_total += d->size * 512;
- }
- closedir(dir);
-
- // ------------------------------------------------------------------------
- // cleanup
-
- BTRFS_DISK *last = NULL;
- d = node->disks;
-
- while(d) {
- if(unlikely(!d->exists)) {
- if(unlikely(node->disks == d)) {
- node->disks = d->next;
- btrfs_free_disk(d);
- d = node->disks;
- last = NULL;
- }
- else {
- last->next = d->next;
- btrfs_free_disk(d);
- d = last->next;
- }
-
- continue;
- }
-
- last = d;
- d = d->next;
- }
-
- return 0;
-}
-
-static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) {
- char filename[FILENAME_MAX + 1];
-
- BTRFS_DEVICE *d;
- for(d = node->devices ; d ; d = d->next)
- d->exists = 0;
-
- DIR *dir = opendir(path);
- if (!dir) {
- if(!node->logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
- node->logged_error = 1;
- }
- return 1;
- }
- node->logged_error = 0;
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if (de->d_type != DT_DIR
- || !strcmp(de->d_name, ".")
- || !strcmp(de->d_name, "..")
- ) {
- // collector_info("BTRFS: ignoring '%s'", de->d_name);
- continue;
- }
-
- // internal_error("BTRFS: device found '%s'", de->d_name);
-
- // --------------------------------------------------------------------
- // search for it
-
- for(d = node->devices ; d ; d = d->next) {
- if(str2ll(de->d_name, NULL) == d->id){
- // collector_info("BTRFS: existing device id '%d'", d->id);
- break;
- }
- }
-
- // --------------------------------------------------------------------
- // did we find it?
-
- if(!d) {
- d = callocz(sizeof(BTRFS_DEVICE), 1);
-
- d->id = str2ll(de->d_name, NULL);
- // collector_info("BTRFS: new device with id '%d'", d->id);
-
- snprintfz(filename, FILENAME_MAX, "%s/%d/error_stats", path, d->id);
- d->error_stats_filename = strdupz(filename);
- // collector_info("BTRFS: error_stats_filename '%s'", filename);
-
- // link it
- d->next = node->devices;
- node->devices = d;
- }
-
- d->exists = 1;
-
-
- // --------------------------------------------------------------------
- // update the values
-
- if(unlikely(collect_btrfs_error_stats(d)))
- d->exists = 0; // 'd' will be garbaged collected in loop below
- }
- closedir(dir);
-
- // ------------------------------------------------------------------------
- // cleanup
-
- BTRFS_DEVICE *last = NULL;
- d = node->devices;
-
- while(d) {
- if(unlikely(!d->exists)) {
- if(unlikely(node->devices == d)) {
- node->devices = d->next;
- btrfs_free_device(d);
- d = node->devices;
- last = NULL;
- }
- else {
- last->next = d->next;
- btrfs_free_device(d);
- d = last->next;
- }
-
- continue;
- }
-
- last = d;
- d = d->next;
- }
-
- return 0;
-}
-
-
-static inline int find_all_btrfs_pools(const char *path, int update_every) {
- static int logged_error = 0;
- char filename[FILENAME_MAX + 1];
-
- BTRFS_NODE *node;
- for(node = nodes ; node ; node = node->next)
- node->exists = 0;
-
- DIR *dir = opendir(path);
- if (!dir) {
- if(!logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
- logged_error = 1;
- }
- return 1;
- }
- logged_error = 0;
-
- struct dirent *de = NULL;
- while ((de = readdir(dir))) {
- if(de->d_type != DT_DIR
- || !strcmp(de->d_name, ".")
- || !strcmp(de->d_name, "..")
- || !strcmp(de->d_name, "features")
- ) {
- // collector_info("BTRFS: ignoring '%s'", de->d_name);
- continue;
- }
-
- uint32_t hash = simple_hash(de->d_name);
-
- // search for it
- for(node = nodes ; node ; node = node->next) {
- if(hash == node->hash && !strcmp(de->d_name, node->id))
- break;
- }
-
- // did we find it?
- if(node) {
- // collector_info("BTRFS: already exists '%s'", de->d_name);
- node->exists = 1;
-
- // update the disk sizes
- snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
- find_btrfs_disks(node, filename);
-
- // update devices
- snprintfz(filename, FILENAME_MAX, "%s/%s/devinfo", path, de->d_name);
- find_btrfs_devices(node, filename);
-
- continue;
- }
-
- // collector_info("BTRFS: adding '%s'", de->d_name);
-
- // not found, create it
- node = callocz(sizeof(BTRFS_NODE), 1);
-
- node->id = strdupz(de->d_name);
- node->hash = simple_hash(node->id);
- node->exists = 1;
-
- {
- char label[FILENAME_MAX + 1] = "";
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/label", path, de->d_name);
- if(read_file(filename, label, FILENAME_MAX) != 0) {
- collector_error("BTRFS: failed to read '%s'", filename);
- btrfs_free_node(node);
- continue;
- }
-
- char *s = label;
- if (s[0])
- s = trim(label);
-
- if(s && s[0])
- node->label = strdupz(s);
- else
- node->label = strdupz(node->id);
- }
-
- // --------------------------------------------------------------------
- // macros to simplify our life
-
- #define init_btrfs_allocation_field(FIELD) {\
- snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #FIELD, path, de->d_name); \
- if(read_single_number_file(filename, &node->allocation_ ## FIELD) != 0) {\
- collector_error("BTRFS: failed to read '%s'", filename);\
- btrfs_free_node(node);\
- continue;\
- }\
- if(!node->allocation_ ## FIELD ## _filename)\
- node->allocation_ ## FIELD ## _filename = strdupz(filename);\
- }
-
- #define init_btrfs_allocation_section_field(SECTION, FIELD) {\
- snprintfz(filename, FILENAME_MAX, "%s/%s/allocation/" #SECTION "/" #FIELD, path, de->d_name); \
- if(read_single_number_file(filename, &node->allocation_ ## SECTION ## _ ## FIELD) != 0) {\
- collector_error("BTRFS: failed to read '%s'", filename);\
- btrfs_free_node(node);\
- continue;\
- }\
- if(!node->allocation_ ## SECTION ## _ ## FIELD ## _filename)\
- node->allocation_ ## SECTION ## _ ## FIELD ## _filename = strdupz(filename);\
- }
-
- // --------------------------------------------------------------------
- // allocation/data
-
- init_btrfs_allocation_section_field(data, total_bytes);
- init_btrfs_allocation_section_field(data, bytes_used);
- init_btrfs_allocation_section_field(data, disk_total);
- init_btrfs_allocation_section_field(data, disk_used);
-
-
- // --------------------------------------------------------------------
- // allocation/metadata
-
- init_btrfs_allocation_section_field(metadata, total_bytes);
- init_btrfs_allocation_section_field(metadata, bytes_used);
- init_btrfs_allocation_section_field(metadata, disk_total);
- init_btrfs_allocation_section_field(metadata, disk_used);
-
- init_btrfs_allocation_field(global_rsv_size);
- // init_btrfs_allocation_field(global_rsv_reserved);
-
-
- // --------------------------------------------------------------------
- // allocation/system
-
- init_btrfs_allocation_section_field(system, total_bytes);
- init_btrfs_allocation_section_field(system, bytes_used);
- init_btrfs_allocation_section_field(system, disk_total);
- init_btrfs_allocation_section_field(system, disk_used);
-
- // --------------------------------------------------------------------
- // commit stats
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/commit_stats", path, de->d_name);
- if(!node->commit_stats_filename) node->commit_stats_filename = strdupz(filename);
- if(unlikely(collect_btrfs_commits_stats(node, update_every))){
- collector_error("BTRFS: failed to collect commit stats for '%s'", node->id);
- btrfs_free_commits_stats(node);
- }
-
- // --------------------------------------------------------------------
- // find all disks related to this node
- // and collect their sizes
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/devices", path, de->d_name);
- find_btrfs_disks(node, filename);
-
- // --------------------------------------------------------------------
- // find all devices related to this node
-
- snprintfz(filename, FILENAME_MAX, "%s/%s/devinfo", path, de->d_name);
- find_btrfs_devices(node, filename);
-
- // --------------------------------------------------------------------
- // link it
-
- // collector_info("BTRFS: linking '%s'", node->id);
- node->next = nodes;
- nodes = node;
- }
- closedir(dir);
-
-
- // ------------------------------------------------------------------------
- // cleanup
-
- BTRFS_NODE *last = NULL;
- node = nodes;
-
- while(node) {
- if(unlikely(!node->exists)) {
- if(unlikely(nodes == node)) {
- nodes = node->next;
- btrfs_free_node(node);
- node = nodes;
- last = NULL;
- }
- else {
- last->next = node->next;
- btrfs_free_node(node);
- node = last->next;
- }
-
- continue;
- }
-
- last = node;
- node = node->next;
- }
-
- return 0;
-}
-
-static void add_labels_to_btrfs(BTRFS_NODE *n, RRDSET *st) {
- rrdlabels_add(st->rrdlabels, "filesystem_uuid", n->id, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "filesystem_label", n->label, RRDLABEL_SRC_AUTO);
-}
-
-int do_sys_fs_btrfs(int update_every, usec_t dt) {
- static int initialized = 0
- , do_allocation_disks = CONFIG_BOOLEAN_AUTO
- , do_allocation_system = CONFIG_BOOLEAN_AUTO
- , do_allocation_data = CONFIG_BOOLEAN_AUTO
- , do_allocation_metadata = CONFIG_BOOLEAN_AUTO
- , do_commit_stats = CONFIG_BOOLEAN_AUTO
- , do_error_stats = CONFIG_BOOLEAN_AUTO;
-
- static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC;
- static char *btrfs_path = NULL;
-
- (void)dt;
-
- if(unlikely(!initialized)) {
- initialized = 1;
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs");
- btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename);
-
- refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
- refresh_delta = refresh_every;
-
- do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks);
- do_allocation_data = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "data allocation", do_allocation_data);
- do_allocation_metadata = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "metadata allocation", do_allocation_metadata);
- do_allocation_system = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "system allocation", do_allocation_system);
- do_commit_stats = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "commit stats", do_commit_stats);
- do_error_stats = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "error stats", do_error_stats);
- }
-
- refresh_delta += dt;
- if(refresh_delta >= refresh_every) {
- refresh_delta = 0;
- find_all_btrfs_pools(btrfs_path, update_every);
- }
-
- BTRFS_NODE *node;
- for(node = nodes; node ; node = node->next) {
- // --------------------------------------------------------------------
- // allocation/system
-
- #define collect_btrfs_allocation_field(FIELD) \
- read_single_number_file(node->allocation_ ## FIELD ## _filename, &node->allocation_ ## FIELD)
-
- #define collect_btrfs_allocation_section_field(SECTION, FIELD) \
- read_single_number_file(node->allocation_ ## SECTION ## _ ## FIELD ## _filename, &node->allocation_ ## SECTION ## _ ## FIELD)
-
- if(do_allocation_disks != CONFIG_BOOLEAN_NO) {
- if( collect_btrfs_allocation_section_field(data, disk_total) != 0
- || collect_btrfs_allocation_section_field(data, disk_used) != 0
- || collect_btrfs_allocation_section_field(metadata, disk_total) != 0
- || collect_btrfs_allocation_section_field(metadata, disk_used) != 0
- || collect_btrfs_allocation_section_field(system, disk_total) != 0
- || collect_btrfs_allocation_section_field(system, disk_used) != 0) {
- collector_error("BTRFS: failed to collect physical disks allocation for '%s'", node->id);
- // make it refresh btrfs at the next iteration
- refresh_delta = refresh_every;
- continue;
- }
- }
-
- if(do_allocation_data != CONFIG_BOOLEAN_NO) {
- if (collect_btrfs_allocation_section_field(data, total_bytes) != 0
- || collect_btrfs_allocation_section_field(data, bytes_used) != 0) {
- collector_error("BTRFS: failed to collect allocation/data for '%s'", node->id);
- // make it refresh btrfs at the next iteration
- refresh_delta = refresh_every;
- continue;
- }
- }
-
- if(do_allocation_metadata != CONFIG_BOOLEAN_NO) {
- if (collect_btrfs_allocation_section_field(metadata, total_bytes) != 0
- || collect_btrfs_allocation_section_field(metadata, bytes_used) != 0
- || collect_btrfs_allocation_field(global_rsv_size) != 0
- ) {
- collector_error("BTRFS: failed to collect allocation/metadata for '%s'", node->id);
- // make it refresh btrfs at the next iteration
- refresh_delta = refresh_every;
- continue;
- }
- }
-
- if(do_allocation_system != CONFIG_BOOLEAN_NO) {
- if (collect_btrfs_allocation_section_field(system, total_bytes) != 0
- || collect_btrfs_allocation_section_field(system, bytes_used) != 0) {
- collector_error("BTRFS: failed to collect allocation/system for '%s'", node->id);
- // make it refresh btrfs at the next iteration
- refresh_delta = refresh_every;
- continue;
- }
- }
-
- if(do_commit_stats != CONFIG_BOOLEAN_NO && node->commit_stats_filename) {
- if (unlikely(collect_btrfs_commits_stats(node, update_every))) {
- collector_error("BTRFS: failed to collect commit stats for '%s'", node->id);
- btrfs_free_commits_stats(node);
- }
- }
-
- if(do_error_stats != CONFIG_BOOLEAN_NO) {
- for(BTRFS_DEVICE *d = node->devices ; d ; d = d->next) {
- if(unlikely(collect_btrfs_error_stats(d))){
- collector_error("BTRFS: failed to collect error stats for '%s', devid:'%d'", node->id, d->id);
- /* make it refresh btrfs at the next iteration,
- * btrfs_free_device(d) will be called in
- * find_btrfs_devices() as part of the garbage collection */
- refresh_delta = refresh_every;
- }
- }
- }
-
- // --------------------------------------------------------------------
- // allocation/disks
-
- if(do_allocation_disks == CONFIG_BOOLEAN_YES || (do_allocation_disks == CONFIG_BOOLEAN_AUTO &&
- ((node->all_disks_total && node->allocation_data_disk_total) ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_allocation_disks = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!node->st_allocation_disks)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "disk_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "disk_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Physical Disk Allocation");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_allocation_disks = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.disk"
- , title
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_DISK
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- node->rd_allocation_disks_unallocated = rrddim_add(node->st_allocation_disks, "unallocated", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_data_free = rrddim_add(node->st_allocation_disks, "data_free", "data free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_data_used = rrddim_add(node->st_allocation_disks, "data_used", "data used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_metadata_free = rrddim_add(node->st_allocation_disks, "meta_free", "meta free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_metadata_used = rrddim_add(node->st_allocation_disks, "meta_used", "meta used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_system_free = rrddim_add(node->st_allocation_disks, "sys_free", "sys free", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_disks_system_used = rrddim_add(node->st_allocation_disks, "sys_used", "sys used", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_allocation_disks);
- }
-
- // unsigned long long disk_used = node->allocation_data_disk_used + node->allocation_metadata_disk_used + node->allocation_system_disk_used;
- unsigned long long disk_total = node->allocation_data_disk_total + node->allocation_metadata_disk_total + node->allocation_system_disk_total;
- unsigned long long disk_unallocated = node->all_disks_total - disk_total;
-
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_unallocated, disk_unallocated);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_used, node->allocation_data_disk_used);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_data_free, node->allocation_data_disk_total - node->allocation_data_disk_used);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_used, node->allocation_metadata_disk_used);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_metadata_free, node->allocation_metadata_disk_total - node->allocation_metadata_disk_used);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_used, node->allocation_system_disk_used);
- rrddim_set_by_pointer(node->st_allocation_disks, node->rd_allocation_disks_system_free, node->allocation_system_disk_total - node->allocation_system_disk_used);
- rrdset_done(node->st_allocation_disks);
- }
-
-
- // --------------------------------------------------------------------
- // allocation/data
-
- if(do_allocation_data == CONFIG_BOOLEAN_YES || (do_allocation_data == CONFIG_BOOLEAN_AUTO &&
- (node->allocation_data_total_bytes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_allocation_data = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!node->st_allocation_data)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "data_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "data_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Data Allocation");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_allocation_data = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.data"
- , title
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_DATA
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- node->rd_allocation_data_free = rrddim_add(node->st_allocation_data, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_data_used = rrddim_add(node->st_allocation_data, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_allocation_data);
- }
-
- rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_free, node->allocation_data_total_bytes - node->allocation_data_bytes_used);
- rrddim_set_by_pointer(node->st_allocation_data, node->rd_allocation_data_used, node->allocation_data_bytes_used);
- rrdset_done(node->st_allocation_data);
- }
-
- // --------------------------------------------------------------------
- // allocation/metadata
-
- if(do_allocation_metadata == CONFIG_BOOLEAN_YES || (do_allocation_metadata == CONFIG_BOOLEAN_AUTO &&
- (node->allocation_metadata_total_bytes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_allocation_metadata = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!node->st_allocation_metadata)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "metadata_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "metadata_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Metadata Allocation");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_allocation_metadata = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.metadata"
- , title
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_METADATA
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- node->rd_allocation_metadata_free = rrddim_add(node->st_allocation_metadata, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_metadata_used = rrddim_add(node->st_allocation_metadata, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_metadata_reserved = rrddim_add(node->st_allocation_metadata, "reserved", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_allocation_metadata);
- }
-
- rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_free, node->allocation_metadata_total_bytes - node->allocation_metadata_bytes_used - node->allocation_global_rsv_size);
- rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_used, node->allocation_metadata_bytes_used);
- rrddim_set_by_pointer(node->st_allocation_metadata, node->rd_allocation_metadata_reserved, node->allocation_global_rsv_size);
- rrdset_done(node->st_allocation_metadata);
- }
-
- // --------------------------------------------------------------------
- // allocation/system
-
- if(do_allocation_system == CONFIG_BOOLEAN_YES || (do_allocation_system == CONFIG_BOOLEAN_AUTO &&
- (node->allocation_system_total_bytes ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_allocation_system = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!node->st_allocation_system)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "system_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "system_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS System Allocation");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_allocation_system = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.system"
- , title
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_SYSTEM
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- node->rd_allocation_system_free = rrddim_add(node->st_allocation_system, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- node->rd_allocation_system_used = rrddim_add(node->st_allocation_system, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_allocation_system);
- }
-
- rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_free, node->allocation_system_total_bytes - node->allocation_system_bytes_used);
- rrddim_set_by_pointer(node->st_allocation_system, node->rd_allocation_system_used, node->allocation_system_bytes_used);
- rrdset_done(node->st_allocation_system);
- }
-
- // --------------------------------------------------------------------
- // commit_stats
-
- if(do_commit_stats == CONFIG_BOOLEAN_YES || (do_commit_stats == CONFIG_BOOLEAN_AUTO &&
- (node->commits_total ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_commit_stats = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!node->st_commits)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "commits_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "commits_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Commits");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_commits = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.commits"
- , title
- , "commits"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_COMMITS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- node->rd_commits = rrddim_add(node->st_commits, "commits", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_commits);
- }
-
- rrddim_set_by_pointer(node->st_commits, node->rd_commits, node->commits_new);
- rrdset_done(node->st_commits);
-
- if(unlikely(!node->st_commits_percentage_time)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "commits_perc_time_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Commits Time Share");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_commits_percentage_time = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.commits_perc_time"
- , title
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_COMMITS_PERC_TIME
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- node->rd_commits_percentage_time = rrddim_add(node->st_commits_percentage_time, "commits", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_commits_percentage_time);
- }
-
- rrddim_set_by_pointer(node->st_commits_percentage_time, node->rd_commits_percentage_time, node->commits_percentage_time);
- rrdset_done(node->st_commits_percentage_time);
-
-
- if(unlikely(!node->st_commit_timings)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "commit_timings_%s", node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Commit Timings");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- node->st_commit_timings = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.commit_timings"
- , title
- , "ms"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_COMMIT_TIMINGS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- node->rd_commit_timings_last = rrddim_add(node->st_commit_timings, "last", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- node->rd_commit_timings_max = rrddim_add(node->st_commit_timings, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_btrfs(node, node->st_commit_timings);
- }
-
- rrddim_set_by_pointer(node->st_commit_timings, node->rd_commit_timings_last, node->commit_timings_last);
- rrddim_set_by_pointer(node->st_commit_timings, node->rd_commit_timings_max, node->commit_timings_max);
- rrdset_done(node->st_commit_timings);
- }
-
- // --------------------------------------------------------------------
- // error_stats per device
-
- if(do_error_stats == CONFIG_BOOLEAN_YES || (do_error_stats == CONFIG_BOOLEAN_AUTO &&
- (node->devices ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- do_error_stats = CONFIG_BOOLEAN_YES;
-
- for(BTRFS_DEVICE *d = node->devices ; d ; d = d->next) {
-
- if(unlikely(!d->st_error_stats)) {
- char id[RRD_ID_LENGTH_MAX + 1], name[RRD_ID_LENGTH_MAX + 1], title[200 + 1];
-
- snprintfz(id, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->id);
- snprintfz(name, RRD_ID_LENGTH_MAX, "device_errors_dev%d_%s", d->id, node->label);
- snprintfz(title, sizeof(title) - 1, "BTRFS Device Errors");
-
- netdata_fix_chart_id(id);
- netdata_fix_chart_name(name);
-
- d->st_error_stats = rrdset_create_localhost(
- "btrfs"
- , id
- , name
- , node->label
- , "btrfs.device_errors"
- , title
- , "errors"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_BTRFS_NAME
- , NETDATA_CHART_PRIO_BTRFS_ERRORS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- char rd_id[RRD_ID_LENGTH_MAX + 1];
- snprintfz(rd_id, RRD_ID_LENGTH_MAX, "write_errs");
- d->rd_write_errs = rrddim_add(d->st_error_stats, rd_id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- snprintfz(rd_id, RRD_ID_LENGTH_MAX, "read_errs");
- d->rd_read_errs = rrddim_add(d->st_error_stats, rd_id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- snprintfz(rd_id, RRD_ID_LENGTH_MAX, "flush_errs");
- d->rd_flush_errs = rrddim_add(d->st_error_stats, rd_id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- snprintfz(rd_id, RRD_ID_LENGTH_MAX, "corruption_errs");
- d->rd_corruption_errs = rrddim_add(d->st_error_stats, rd_id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- snprintfz(rd_id, RRD_ID_LENGTH_MAX, "generation_errs");
- d->rd_generation_errs = rrddim_add(d->st_error_stats, rd_id, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- char dev_id[5];
- snprintfz(dev_id, 4, "%d", d->id);
- rrdlabels_add(d->st_error_stats->rrdlabels, "device_id", dev_id, RRDLABEL_SRC_AUTO);
- add_labels_to_btrfs(node, d->st_error_stats);
- }
-
- rrddim_set_by_pointer(d->st_error_stats, d->rd_write_errs, d->write_errs);
- rrddim_set_by_pointer(d->st_error_stats, d->rd_read_errs, d->read_errs);
- rrddim_set_by_pointer(d->st_error_stats, d->rd_flush_errs, d->flush_errs);
- rrddim_set_by_pointer(d->st_error_stats, d->rd_corruption_errs, d->corruption_errs);
- rrddim_set_by_pointer(d->st_error_stats, d->rd_generation_errs, d->generation_errs);
-
- rrdset_done(d->st_error_stats);
- }
- }
- }
-
- return 0;
-}
-
diff --git a/collectors/proc.plugin/sys_kernel_mm_ksm.c b/collectors/proc.plugin/sys_kernel_mm_ksm.c
deleted file mode 100644
index 45f1ac330..000000000
--- a/collectors/proc.plugin/sys_kernel_mm_ksm.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugin_proc.h"
-
-#define PLUGIN_PROC_MODULE_KSM_NAME "/sys/kernel/mm/ksm"
-
-typedef struct ksm_name_value {
- char filename[FILENAME_MAX + 1];
- unsigned long long value;
-} KSM_NAME_VALUE;
-
-#define PAGES_SHARED 0
-#define PAGES_SHARING 1
-#define PAGES_UNSHARED 2
-#define PAGES_VOLATILE 3
-#define PAGES_TO_SCAN 4
-
-KSM_NAME_VALUE values[] = {
- [PAGES_SHARED] = { "/sys/kernel/mm/ksm/pages_shared", 0ULL },
- [PAGES_SHARING] = { "/sys/kernel/mm/ksm/pages_sharing", 0ULL },
- [PAGES_UNSHARED] = { "/sys/kernel/mm/ksm/pages_unshared", 0ULL },
- [PAGES_VOLATILE] = { "/sys/kernel/mm/ksm/pages_volatile", 0ULL },
- // [PAGES_TO_SCAN] = { "/sys/kernel/mm/ksm/pages_to_scan", 0ULL },
-};
-
-int do_sys_kernel_mm_ksm(int update_every, usec_t dt) {
- (void)dt;
- static procfile *ff_pages_shared = NULL, *ff_pages_sharing = NULL, *ff_pages_unshared = NULL, *ff_pages_volatile = NULL/*, *ff_pages_to_scan = NULL*/;
- static unsigned long page_size = 0;
-
- if(unlikely(page_size == 0))
- page_size = (unsigned long)sysconf(_SC_PAGESIZE);
-
- if(unlikely(!ff_pages_shared)) {
- snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_shared");
- snprintfz(values[PAGES_SHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_shared", values[PAGES_SHARED].filename));
- ff_pages_shared = procfile_open(values[PAGES_SHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
- }
-
- if(unlikely(!ff_pages_sharing)) {
- snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_sharing");
- snprintfz(values[PAGES_SHARING].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_sharing", values[PAGES_SHARING].filename));
- ff_pages_sharing = procfile_open(values[PAGES_SHARING].filename, " \t:", PROCFILE_FLAG_DEFAULT);
- }
-
- if(unlikely(!ff_pages_unshared)) {
- snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_unshared");
- snprintfz(values[PAGES_UNSHARED].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_unshared", values[PAGES_UNSHARED].filename));
- ff_pages_unshared = procfile_open(values[PAGES_UNSHARED].filename, " \t:", PROCFILE_FLAG_DEFAULT);
- }
-
- if(unlikely(!ff_pages_volatile)) {
- snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_volatile");
- snprintfz(values[PAGES_VOLATILE].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_volatile", values[PAGES_VOLATILE].filename));
- ff_pages_volatile = procfile_open(values[PAGES_VOLATILE].filename, " \t:", PROCFILE_FLAG_DEFAULT);
- }
-
- //if(unlikely(!ff_pages_to_scan)) {
- // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/mm/ksm/pages_to_scan");
- // snprintfz(values[PAGES_TO_SCAN].filename, FILENAME_MAX, "%s", config_get("plugin:proc:/sys/kernel/mm/ksm", "/sys/kernel/mm/ksm/pages_to_scan", values[PAGES_TO_SCAN].filename));
- // ff_pages_to_scan = procfile_open(values[PAGES_TO_SCAN].filename, " \t:", PROCFILE_FLAG_DEFAULT);
- //}
-
- if(unlikely(!ff_pages_shared || !ff_pages_sharing || !ff_pages_unshared || !ff_pages_volatile /*|| !ff_pages_to_scan */))
- return 1;
-
- unsigned long long pages_shared = 0, pages_sharing = 0, pages_unshared = 0, pages_volatile = 0, /*pages_to_scan = 0,*/ offered = 0, saved = 0;
-
- ff_pages_shared = procfile_readall(ff_pages_shared);
- if(unlikely(!ff_pages_shared)) return 0; // we return 0, so that we will retry to open it next time
- pages_shared = str2ull(procfile_lineword(ff_pages_shared, 0, 0), NULL);
-
- ff_pages_sharing = procfile_readall(ff_pages_sharing);
- if(unlikely(!ff_pages_sharing)) return 0; // we return 0, so that we will retry to open it next time
- pages_sharing = str2ull(procfile_lineword(ff_pages_sharing, 0, 0), NULL);
-
- ff_pages_unshared = procfile_readall(ff_pages_unshared);
- if(unlikely(!ff_pages_unshared)) return 0; // we return 0, so that we will retry to open it next time
- pages_unshared = str2ull(procfile_lineword(ff_pages_unshared, 0, 0), NULL);
-
- ff_pages_volatile = procfile_readall(ff_pages_volatile);
- if(unlikely(!ff_pages_volatile)) return 0; // we return 0, so that we will retry to open it next time
- pages_volatile = str2ull(procfile_lineword(ff_pages_volatile, 0, 0), NULL);
-
- //ff_pages_to_scan = procfile_readall(ff_pages_to_scan);
- //if(unlikely(!ff_pages_to_scan)) return 0; // we return 0, so that we will retry to open it next time
- //pages_to_scan = str2ull(procfile_lineword(ff_pages_to_scan, 0, 0));
-
- offered = pages_sharing + pages_shared + pages_unshared + pages_volatile;
- saved = pages_sharing;
-
- if(unlikely(!offered /*|| !pages_to_scan*/ && netdata_zero_metrics_enabled == CONFIG_BOOLEAN_NO)) return 0;
-
- // --------------------------------------------------------------------
-
- {
- static RRDSET *st_mem_ksm = NULL;
- static RRDDIM *rd_shared = NULL, *rd_unshared = NULL, *rd_sharing = NULL, *rd_volatile = NULL/*, *rd_to_scan = NULL*/;
-
- if (unlikely(!st_mem_ksm)) {
- st_mem_ksm = rrdset_create_localhost(
- "mem"
- , "ksm"
- , NULL
- , "ksm"
- , NULL
- , "Kernel Same Page Merging"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_KSM_NAME
- , NETDATA_CHART_PRIO_MEM_KSM
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_shared = rrddim_add(st_mem_ksm, "shared", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_unshared = rrddim_add(st_mem_ksm, "unshared", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_sharing = rrddim_add(st_mem_ksm, "sharing", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_volatile = rrddim_add(st_mem_ksm, "volatile", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- //rd_to_scan = rrddim_add(st_mem_ksm, "to_scan", "to scan", -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_ksm, rd_shared, pages_shared * page_size);
- rrddim_set_by_pointer(st_mem_ksm, rd_unshared, pages_unshared * page_size);
- rrddim_set_by_pointer(st_mem_ksm, rd_sharing, pages_sharing * page_size);
- rrddim_set_by_pointer(st_mem_ksm, rd_volatile, pages_volatile * page_size);
- //rrddim_set_by_pointer(st_mem_ksm, rd_to_scan, pages_to_scan * page_size);
-
- rrdset_done(st_mem_ksm);
- }
-
- // --------------------------------------------------------------------
-
- {
- static RRDSET *st_mem_ksm_savings = NULL;
- static RRDDIM *rd_savings = NULL, *rd_offered = NULL;
-
- if (unlikely(!st_mem_ksm_savings)) {
- st_mem_ksm_savings = rrdset_create_localhost(
- "mem"
- , "ksm_savings"
- , NULL
- , "ksm"
- , NULL
- , "Kernel Same Page Merging Savings"
- , "MiB"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_KSM_NAME
- , NETDATA_CHART_PRIO_MEM_KSM_SAVINGS
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_savings = rrddim_add(st_mem_ksm_savings, "savings", NULL, -1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_offered = rrddim_add(st_mem_ksm_savings, "offered", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_ksm_savings, rd_savings, saved * page_size);
- rrddim_set_by_pointer(st_mem_ksm_savings, rd_offered, offered * page_size);
-
- rrdset_done(st_mem_ksm_savings);
- }
-
- // --------------------------------------------------------------------
-
- {
- static RRDSET *st_mem_ksm_ratios = NULL;
- static RRDDIM *rd_savings = NULL;
-
- if (unlikely(!st_mem_ksm_ratios)) {
- st_mem_ksm_ratios = rrdset_create_localhost(
- "mem"
- , "ksm_ratios"
- , NULL
- , "ksm"
- , NULL
- , "Kernel Same Page Merging Effectiveness"
- , "percentage"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_KSM_NAME
- , NETDATA_CHART_PRIO_MEM_KSM_RATIOS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_savings = rrddim_add(st_mem_ksm_ratios, "savings", NULL, 1, 10000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_mem_ksm_ratios, rd_savings, offered ? (saved * 1000000) / offered : 0);
- rrdset_done(st_mem_ksm_ratios);
- }
-
- return 0;
-}
diff --git a/collectors/proc.plugin/zfs_common.c b/collectors/proc.plugin/zfs_common.c
deleted file mode 100644
index cca0ae0e6..000000000
--- a/collectors/proc.plugin/zfs_common.c
+++ /dev/null
@@ -1,960 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "zfs_common.h"
-
-struct arcstats arcstats = { 0 };
-
-void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every) {
- static int do_arc_size = -1, do_l2_size = -1, do_reads = -1, do_l2bytes = -1, do_ahits = -1, do_dhits = -1, \
- do_phits = -1, do_mhits = -1, do_l2hits = -1, do_list_hits = -1;
-
- if(unlikely(do_arc_size == -1))
- do_arc_size = do_l2_size = do_reads = do_l2bytes = do_ahits = do_dhits = do_phits = do_mhits \
- = do_l2hits = do_list_hits = show_zero_charts;
-
- // ARC reads
- unsigned long long aread = arcstats.hits + arcstats.misses;
-
- // Demand reads
- unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits;
- unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses;
- unsigned long long dread = dhit + dmiss;
-
- // Prefetch reads
- unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits;
- unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses;
- unsigned long long pread = phit + pmiss;
-
- // Metadata reads
- unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits;
- unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
- unsigned long long mread = mhit + mmiss;
-
- // l2 reads
- unsigned long long l2hit = arcstats.l2_hits;
- unsigned long long l2miss = arcstats.l2_misses;
- unsigned long long l2read = l2hit + l2miss;
-
- // --------------------------------------------------------------------
-
- if(do_arc_size == CONFIG_BOOLEAN_YES || arcstats.size || arcstats.c || arcstats.c_min || arcstats.c_max) {
- do_arc_size = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_arc_size = NULL;
- static RRDDIM *rd_arc_size = NULL;
- static RRDDIM *rd_arc_target_size = NULL;
- static RRDDIM *rd_arc_target_min_size = NULL;
- static RRDDIM *rd_arc_target_max_size = NULL;
-
- if (unlikely(!st_arc_size)) {
- st_arc_size = rrdset_create_localhost(
- "zfs"
- , "arc_size"
- , NULL
- , ZFS_FAMILY_SIZE
- , NULL
- , "ZFS ARC Size"
- , "MiB"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_ARC_SIZE
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_arc_size = rrddim_add(st_arc_size, "size", "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_arc_target_size = rrddim_add(st_arc_size, "target", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_arc_target_min_size = rrddim_add(st_arc_size, "min", "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_arc_target_max_size = rrddim_add(st_arc_size, "max", "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_arc_size, rd_arc_size, arcstats.size);
- rrddim_set_by_pointer(st_arc_size, rd_arc_target_size, arcstats.c);
- rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min);
- rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max);
- rrdset_done(st_arc_size);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(arcstats.l2exist) && (do_l2_size == CONFIG_BOOLEAN_YES || arcstats.l2_size || arcstats.l2_asize)) {
- do_l2_size = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_l2_size = NULL;
- static RRDDIM *rd_l2_size = NULL;
- static RRDDIM *rd_l2_asize = NULL;
-
- if (unlikely(!st_l2_size)) {
- st_l2_size = rrdset_create_localhost(
- "zfs"
- , "l2_size"
- , NULL
- , ZFS_FAMILY_SIZE
- , NULL
- , "ZFS L2 ARC Size"
- , "MiB"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_L2_SIZE
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- rd_l2_size = rrddim_add(st_l2_size, "size", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_l2_size, rd_l2_size, arcstats.l2_size);
- rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize);
- rrdset_done(st_l2_size);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_reads == CONFIG_BOOLEAN_YES || aread || dread || pread || mread || l2read)) {
- do_reads = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_reads = NULL;
- static RRDDIM *rd_aread = NULL;
- static RRDDIM *rd_dread = NULL;
- static RRDDIM *rd_pread = NULL;
- static RRDDIM *rd_mread = NULL;
- static RRDDIM *rd_l2read = NULL;
-
- if (unlikely(!st_reads)) {
- st_reads = rrdset_create_localhost(
- "zfs"
- , "reads"
- , NULL
- , ZFS_FAMILY_ACCESSES
- , NULL
- , "ZFS Reads"
- , "reads/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_READS
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_aread = rrddim_add(st_reads, "areads", "arc", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_dread = rrddim_add(st_reads, "dreads", "demand", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_pread = rrddim_add(st_reads, "preads", "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mread = rrddim_add(st_reads, "mreads", "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(arcstats.l2exist)
- rd_l2read = rrddim_add(st_reads, "l2reads", "l2", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_reads, rd_aread, aread);
- rrddim_set_by_pointer(st_reads, rd_dread, dread);
- rrddim_set_by_pointer(st_reads, rd_pread, pread);
- rrddim_set_by_pointer(st_reads, rd_mread, mread);
-
- if(arcstats.l2exist)
- rrddim_set_by_pointer(st_reads, rd_l2read, l2read);
-
- rrdset_done(st_reads);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(arcstats.l2exist && (do_l2bytes == CONFIG_BOOLEAN_YES || arcstats.l2_read_bytes || arcstats.l2_write_bytes))) {
- do_l2bytes = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_l2bytes = NULL;
- static RRDDIM *rd_l2_read_bytes = NULL;
- static RRDDIM *rd_l2_write_bytes = NULL;
-
- if (unlikely(!st_l2bytes)) {
- st_l2bytes = rrdset_create_localhost(
- "zfs"
- , "bytes"
- , NULL
- , ZFS_FAMILY_ACCESSES
- , NULL
- , "ZFS ARC L2 Read/Write Rate"
- , "KiB/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_IO
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_l2_read_bytes = rrddim_add(st_l2bytes, "read", NULL, 1, 1024, RRD_ALGORITHM_INCREMENTAL);
- rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes);
- rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes);
- rrdset_done(st_l2bytes);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_ahits == CONFIG_BOOLEAN_YES || arcstats.hits || arcstats.misses)) {
- do_ahits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_ahits = NULL;
- static RRDDIM *rd_ahits = NULL;
- static RRDDIM *rd_amisses = NULL;
-
- if (unlikely(!st_ahits)) {
- st_ahits = rrdset_create_localhost(
- "zfs"
- , "hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS ARC Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_HITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_ahits = rrddim_add(st_ahits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_ahits, rd_ahits, arcstats.hits);
- rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses);
- rrdset_done(st_ahits);
-
- static RRDSET *st_ahits_rate = NULL;
- static RRDDIM *rd_ahits_rate = NULL;
- static RRDDIM *rd_amisses_rate = NULL;
-
- if (unlikely(!st_ahits_rate)) {
- st_ahits_rate = rrdset_create_localhost(
- "zfs"
- , "hits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS ARC Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_HITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_ahits_rate = rrddim_add(st_ahits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_amisses_rate = rrddim_add(st_ahits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_ahits_rate, rd_ahits_rate, arcstats.hits);
- rrddim_set_by_pointer(st_ahits_rate, rd_amisses_rate, arcstats.misses);
- rrdset_done(st_ahits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_dhits == CONFIG_BOOLEAN_YES || dhit || dmiss)) {
- do_dhits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_dhits = NULL;
- static RRDDIM *rd_dhits = NULL;
- static RRDDIM *rd_dmisses = NULL;
-
- if (unlikely(!st_dhits)) {
- st_dhits = rrdset_create_localhost(
- "zfs"
- , "dhits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Demand Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_DHITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_dhits = rrddim_add(st_dhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_dhits, rd_dhits, dhit);
- rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss);
- rrdset_done(st_dhits);
-
- static RRDSET *st_dhits_rate = NULL;
- static RRDDIM *rd_dhits_rate = NULL;
- static RRDDIM *rd_dmisses_rate = NULL;
-
- if (unlikely(!st_dhits_rate)) {
- st_dhits_rate = rrdset_create_localhost(
- "zfs"
- , "dhits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Demand Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_DHITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_dhits_rate = rrddim_add(st_dhits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_dmisses_rate = rrddim_add(st_dhits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_dhits_rate, rd_dhits_rate, dhit);
- rrddim_set_by_pointer(st_dhits_rate, rd_dmisses_rate, dmiss);
- rrdset_done(st_dhits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_phits == CONFIG_BOOLEAN_YES || phit || pmiss)) {
- do_phits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_phits = NULL;
- static RRDDIM *rd_phits = NULL;
- static RRDDIM *rd_pmisses = NULL;
-
- if (unlikely(!st_phits)) {
- st_phits = rrdset_create_localhost(
- "zfs"
- , "phits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Prefetch Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_PHITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_phits = rrddim_add(st_phits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_phits, rd_phits, phit);
- rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss);
- rrdset_done(st_phits);
-
- static RRDSET *st_phits_rate = NULL;
- static RRDDIM *rd_phits_rate = NULL;
- static RRDDIM *rd_pmisses_rate = NULL;
-
- if (unlikely(!st_phits_rate)) {
- st_phits_rate = rrdset_create_localhost(
- "zfs"
- , "phits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Prefetch Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_PHITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_phits_rate = rrddim_add(st_phits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_pmisses_rate = rrddim_add(st_phits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_phits_rate, rd_phits_rate, phit);
- rrddim_set_by_pointer(st_phits_rate, rd_pmisses_rate, pmiss);
- rrdset_done(st_phits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_mhits == CONFIG_BOOLEAN_YES || mhit || mmiss)) {
- do_mhits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_mhits = NULL;
- static RRDDIM *rd_mhits = NULL;
- static RRDDIM *rd_mmisses = NULL;
-
- if (unlikely(!st_mhits)) {
- st_mhits = rrdset_create_localhost(
- "zfs"
- , "mhits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Metadata Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_MHITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_mhits = rrddim_add(st_mhits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_mhits, rd_mhits, mhit);
- rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss);
- rrdset_done(st_mhits);
-
- static RRDSET *st_mhits_rate = NULL;
- static RRDDIM *rd_mhits_rate = NULL;
- static RRDDIM *rd_mmisses_rate = NULL;
-
- if (unlikely(!st_mhits_rate)) {
- st_mhits_rate = rrdset_create_localhost(
- "zfs"
- , "mhits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Metadata Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_MHITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_mhits_rate = rrddim_add(st_mhits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mmisses_rate = rrddim_add(st_mhits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_mhits_rate, rd_mhits_rate, mhit);
- rrddim_set_by_pointer(st_mhits_rate, rd_mmisses_rate, mmiss);
- rrdset_done(st_mhits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(arcstats.l2exist && (do_l2hits == CONFIG_BOOLEAN_YES || l2hit || l2miss))) {
- do_l2hits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_l2hits = NULL;
- static RRDDIM *rd_l2hits = NULL;
- static RRDDIM *rd_l2misses = NULL;
-
- if (unlikely(!st_l2hits)) {
- st_l2hits = rrdset_create_localhost(
- "zfs"
- , "l2hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS L2 Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_L2HITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_l2hits = rrddim_add(st_l2hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_l2hits, rd_l2hits, l2hit);
- rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss);
- rrdset_done(st_l2hits);
-
- static RRDSET *st_l2hits_rate = NULL;
- static RRDDIM *rd_l2hits_rate = NULL;
- static RRDDIM *rd_l2misses_rate = NULL;
-
- if (unlikely(!st_l2hits_rate)) {
- st_l2hits_rate = rrdset_create_localhost(
- "zfs"
- , "l2hits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS L2 Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_L2HITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_l2hits_rate = rrddim_add(st_l2hits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_l2misses_rate = rrddim_add(st_l2hits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_l2hits_rate, rd_l2hits_rate, l2hit);
- rrddim_set_by_pointer(st_l2hits_rate, rd_l2misses_rate, l2miss);
- rrdset_done(st_l2hits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_list_hits == CONFIG_BOOLEAN_YES || arcstats.mfu_hits \
- || arcstats.mru_hits \
- || arcstats.mfu_ghost_hits \
- || arcstats.mru_ghost_hits)) {
- do_list_hits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_list_hits = NULL;
- static RRDDIM *rd_mfu = NULL;
- static RRDDIM *rd_mru = NULL;
- static RRDDIM *rd_mfug = NULL;
- static RRDDIM *rd_mrug = NULL;
-
- if (unlikely(!st_list_hits)) {
- st_list_hits = rrdset_create_localhost(
- "zfs"
- , "list_hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS List Hits"
- , "hits/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_LIST_HITS
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rd_mfu = rrddim_add(st_list_hits, "mfu", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mru = rrddim_add(st_list_hits, "mru", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits);
- rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits);
- rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits);
- rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits);
- rrdset_done(st_list_hits);
- }
-}
-
-void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every) {
- static int do_arc_size_breakdown = -1, do_memory = -1, do_important_ops = -1, do_actual_hits = -1, \
- do_demand_data_hits = -1, do_prefetch_data_hits = -1, do_hash_elements = -1, do_hash_chains = -1;
-
- if(unlikely(do_arc_size_breakdown == -1))
- do_arc_size_breakdown = do_memory = do_important_ops = do_actual_hits = do_demand_data_hits \
- = do_prefetch_data_hits = do_hash_elements = do_hash_chains = show_zero_charts;
-
- unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
- unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
- unsigned long long real_misses = arc_accesses_total - real_hits;
-
- //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits);
-
- unsigned long long arc_size = arcstats.size;
- unsigned long long mru_size = arcstats.p;
- //unsigned long long target_min_size = arcstats.c_min;
- //unsigned long long target_max_size = arcstats.c_max;
- unsigned long long target_size = arcstats.c;
- //unsigned long long target_size_ratio = (target_max_size / target_min_size);
-
- unsigned long long mfu_size;
- if(arc_size > target_size)
- mfu_size = arc_size - mru_size;
- else
- mfu_size = target_size - mru_size;
-
- // --------------------------------------------------------------------
-
- if(likely(do_arc_size_breakdown == CONFIG_BOOLEAN_YES || mru_size || mfu_size)) {
- do_arc_size_breakdown = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_arc_size_breakdown = NULL;
- static RRDDIM *rd_most_recent = NULL;
- static RRDDIM *rd_most_frequent = NULL;
-
- if (unlikely(!st_arc_size_breakdown)) {
- st_arc_size_breakdown = rrdset_create_localhost(
- "zfs"
- , "arc_size_breakdown"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS ARC Size Breakdown"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_ARC_SIZE_BREAKDOWN
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_most_recent = rrddim_add(st_arc_size_breakdown, "recent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
- rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
- }
-
- rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent, mru_size);
- rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size);
- rrdset_done(st_arc_size_breakdown);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_memory == CONFIG_BOOLEAN_YES || arcstats.memory_direct_count \
- || arcstats.memory_throttle_count \
- || arcstats.memory_indirect_count)) {
- do_memory = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_memory = NULL;
-#ifndef __FreeBSD__
- static RRDDIM *rd_direct = NULL;
-#endif
- static RRDDIM *rd_throttled = NULL;
-#ifndef __FreeBSD__
- static RRDDIM *rd_indirect = NULL;
-#endif
-
- if (unlikely(!st_memory)) {
- st_memory = rrdset_create_localhost(
- "zfs"
- , "memory_ops"
- , NULL
- , ZFS_FAMILY_OPERATIONS
- , NULL
- , "ZFS Memory Operations"
- , "operations/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_MEMORY_OPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
-#ifndef __FreeBSD__
- rd_direct = rrddim_add(st_memory, "direct", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#ifndef __FreeBSD__
- rd_indirect = rrddim_add(st_memory, "indirect", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-#endif
- }
-
-#ifndef __FreeBSD__
- rrddim_set_by_pointer(st_memory, rd_direct, arcstats.memory_direct_count);
-#endif
- rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count);
-#ifndef __FreeBSD__
- rrddim_set_by_pointer(st_memory, rd_indirect, arcstats.memory_indirect_count);
-#endif
- rrdset_done(st_memory);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_important_ops == CONFIG_BOOLEAN_YES || arcstats.deleted \
- || arcstats.evict_skip \
- || arcstats.mutex_miss \
- || arcstats.hash_collisions)) {
- do_important_ops = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_important_ops = NULL;
- static RRDDIM *rd_deleted = NULL;
- static RRDDIM *rd_mutex_misses = NULL;
- static RRDDIM *rd_evict_skips = NULL;
- static RRDDIM *rd_hash_collisions = NULL;
-
- if (unlikely(!st_important_ops)) {
- st_important_ops = rrdset_create_localhost(
- "zfs"
- , "important_ops"
- , NULL
- , ZFS_FAMILY_OPERATIONS
- , NULL
- , "ZFS Important Operations"
- , "operations/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_IMPORTANT_OPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_evict_skips = rrddim_add(st_important_ops, "eskip", "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_deleted = rrddim_add(st_important_ops, "deleted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_mutex_misses = rrddim_add(st_important_ops, "mtxmis", "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_important_ops, rd_deleted, arcstats.deleted);
- rrddim_set_by_pointer(st_important_ops, rd_evict_skips, arcstats.evict_skip);
- rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss);
- rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions);
- rrdset_done(st_important_ops);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_actual_hits == CONFIG_BOOLEAN_YES || real_hits || real_misses)) {
- do_actual_hits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_actual_hits = NULL;
- static RRDDIM *rd_actual_hits = NULL;
- static RRDDIM *rd_actual_misses = NULL;
-
- if (unlikely(!st_actual_hits)) {
- st_actual_hits = rrdset_create_localhost(
- "zfs"
- , "actual_hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Actual Cache Hits"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_actual_hits = rrddim_add(st_actual_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_actual_hits, rd_actual_hits, real_hits);
- rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses);
- rrdset_done(st_actual_hits);
-
- static RRDSET *st_actual_hits_rate = NULL;
- static RRDDIM *rd_actual_hits_rate = NULL;
- static RRDDIM *rd_actual_misses_rate = NULL;
-
- if (unlikely(!st_actual_hits_rate)) {
- st_actual_hits_rate = rrdset_create_localhost(
- "zfs"
- , "actual_hits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Actual Cache Hits Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_ACTUAL_HITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_actual_hits_rate = rrddim_add(st_actual_hits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_actual_misses_rate = rrddim_add(st_actual_hits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_actual_hits_rate, rd_actual_hits_rate, real_hits);
- rrddim_set_by_pointer(st_actual_hits_rate, rd_actual_misses_rate, real_misses);
- rrdset_done(st_actual_hits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_demand_data_hits == CONFIG_BOOLEAN_YES || arcstats.demand_data_hits || arcstats.demand_data_misses)) {
- do_demand_data_hits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_demand_data_hits = NULL;
- static RRDDIM *rd_demand_data_hits = NULL;
- static RRDDIM *rd_demand_data_misses = NULL;
-
- if (unlikely(!st_demand_data_hits)) {
- st_demand_data_hits = rrdset_create_localhost(
- "zfs"
- , "demand_data_hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Data Demand Efficiency"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_demand_data_hits = rrddim_add(st_demand_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits, arcstats.demand_data_hits);
- rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses);
- rrdset_done(st_demand_data_hits);
-
- static RRDSET *st_demand_data_hits_rate = NULL;
- static RRDDIM *rd_demand_data_hits_rate = NULL;
- static RRDDIM *rd_demand_data_misses_rate = NULL;
-
- if (unlikely(!st_demand_data_hits_rate)) {
- st_demand_data_hits_rate = rrdset_create_localhost(
- "zfs"
- , "demand_data_hits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Data Demand Efficiency Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_DEMAND_DATA_HITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_demand_data_hits_rate = rrddim_add(st_demand_data_hits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_demand_data_misses_rate = rrddim_add(st_demand_data_hits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_demand_data_hits_rate, rd_demand_data_hits_rate, arcstats.demand_data_hits);
- rrddim_set_by_pointer(st_demand_data_hits_rate, rd_demand_data_misses_rate, arcstats.demand_data_misses);
- rrdset_done(st_demand_data_hits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_prefetch_data_hits == CONFIG_BOOLEAN_YES || arcstats.prefetch_data_hits \
- || arcstats.prefetch_data_misses)) {
- do_prefetch_data_hits = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_prefetch_data_hits = NULL;
- static RRDDIM *rd_prefetch_data_hits = NULL;
- static RRDDIM *rd_prefetch_data_misses = NULL;
-
- if (unlikely(!st_prefetch_data_hits)) {
- st_prefetch_data_hits = rrdset_create_localhost(
- "zfs"
- , "prefetch_data_hits"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Data Prefetch Efficiency"
- , "percentage"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_prefetch_data_hits = rrddim_add(st_prefetch_data_hits, "hits", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- }
-
- rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits, arcstats.prefetch_data_hits);
- rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses);
- rrdset_done(st_prefetch_data_hits);
-
- static RRDSET *st_prefetch_data_hits_rate = NULL;
- static RRDDIM *rd_prefetch_data_hits_rate = NULL;
- static RRDDIM *rd_prefetch_data_misses_rate = NULL;
-
- if (unlikely(!st_prefetch_data_hits_rate)) {
- st_prefetch_data_hits_rate = rrdset_create_localhost(
- "zfs"
- , "prefetch_data_hits_rate"
- , NULL
- , ZFS_FAMILY_EFFICIENCY
- , NULL
- , "ZFS Data Prefetch Efficiency Rate"
- , "events/s"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_PREFETCH_DATA_HITS + 1
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rd_prefetch_data_hits_rate = rrddim_add(st_prefetch_data_hits_rate, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_prefetch_data_misses_rate = rrddim_add(st_prefetch_data_hits_rate, "misses", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st_prefetch_data_hits_rate, rd_prefetch_data_hits_rate, arcstats.prefetch_data_hits);
- rrddim_set_by_pointer(st_prefetch_data_hits_rate, rd_prefetch_data_misses_rate, arcstats.prefetch_data_misses);
- rrdset_done(st_prefetch_data_hits_rate);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_hash_elements == CONFIG_BOOLEAN_YES || arcstats.hash_elements || arcstats.hash_elements_max)) {
- do_hash_elements = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_hash_elements = NULL;
- static RRDDIM *rd_hash_elements_current = NULL;
- static RRDDIM *rd_hash_elements_max = NULL;
-
- if (unlikely(!st_hash_elements)) {
- st_hash_elements = rrdset_create_localhost(
- "zfs"
- , "hash_elements"
- , NULL
- , ZFS_FAMILY_HASH
- , NULL
- , "ZFS ARC Hash Elements"
- , "elements"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_HASH_ELEMENTS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_hash_elements_max = rrddim_add(st_hash_elements, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements);
- rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max);
- rrdset_done(st_hash_elements);
- }
-
- // --------------------------------------------------------------------
-
- if(likely(do_hash_chains == CONFIG_BOOLEAN_YES || arcstats.hash_chains || arcstats.hash_chain_max)) {
- do_hash_chains = CONFIG_BOOLEAN_YES;
-
- static RRDSET *st_hash_chains = NULL;
- static RRDDIM *rd_hash_chains_current = NULL;
- static RRDDIM *rd_hash_chains_max = NULL;
-
- if (unlikely(!st_hash_chains)) {
- st_hash_chains = rrdset_create_localhost(
- "zfs"
- , "hash_chains"
- , NULL
- , ZFS_FAMILY_HASH
- , NULL
- , "ZFS ARC Hash Chains"
- , "chains"
- , plugin
- , module
- , NETDATA_CHART_PRIO_ZFS_HASH_CHAINS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_hash_chains_max = rrddim_add(st_hash_chains, "max", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains);
- rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max);
- rrdset_done(st_hash_chains);
- }
-
- // --------------------------------------------------------------------
-
-}
diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h
deleted file mode 100644
index 9d61de2f3..000000000
--- a/collectors/proc.plugin/zfs_common.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_ZFS_COMMON_H
-#define NETDATA_ZFS_COMMON_H 1
-
-#include "daemon/common.h"
-
-#define ZFS_FAMILY_SIZE "size"
-#define ZFS_FAMILY_EFFICIENCY "efficiency"
-#define ZFS_FAMILY_ACCESSES "accesses"
-#define ZFS_FAMILY_OPERATIONS "operations"
-#define ZFS_FAMILY_HASH "hashes"
-
-struct arcstats {
- // values
- unsigned long long hits;
- unsigned long long misses;
- unsigned long long demand_data_hits;
- unsigned long long demand_data_misses;
- unsigned long long demand_metadata_hits;
- unsigned long long demand_metadata_misses;
- unsigned long long prefetch_data_hits;
- unsigned long long prefetch_data_misses;
- unsigned long long prefetch_metadata_hits;
- unsigned long long prefetch_metadata_misses;
- unsigned long long mru_hits;
- unsigned long long mru_ghost_hits;
- unsigned long long mfu_hits;
- unsigned long long mfu_ghost_hits;
- unsigned long long deleted;
- unsigned long long mutex_miss;
- unsigned long long evict_skip;
- unsigned long long evict_not_enough;
- unsigned long long evict_l2_cached;
- unsigned long long evict_l2_eligible;
- unsigned long long evict_l2_ineligible;
- unsigned long long evict_l2_skip;
- unsigned long long hash_elements;
- unsigned long long hash_elements_max;
- unsigned long long hash_collisions;
- unsigned long long hash_chains;
- unsigned long long hash_chain_max;
- unsigned long long p;
- unsigned long long c;
- unsigned long long c_min;
- unsigned long long c_max;
- unsigned long long size;
- unsigned long long hdr_size;
- unsigned long long data_size;
- unsigned long long metadata_size;
- unsigned long long other_size;
- unsigned long long anon_size;
- unsigned long long anon_evictable_data;
- unsigned long long anon_evictable_metadata;
- unsigned long long mru_size;
- unsigned long long mru_evictable_data;
- unsigned long long mru_evictable_metadata;
- unsigned long long mru_ghost_size;
- unsigned long long mru_ghost_evictable_data;
- unsigned long long mru_ghost_evictable_metadata;
- unsigned long long mfu_size;
- unsigned long long mfu_evictable_data;
- unsigned long long mfu_evictable_metadata;
- unsigned long long mfu_ghost_size;
- unsigned long long mfu_ghost_evictable_data;
- unsigned long long mfu_ghost_evictable_metadata;
- unsigned long long l2_hits;
- unsigned long long l2_misses;
- unsigned long long l2_feeds;
- unsigned long long l2_rw_clash;
- unsigned long long l2_read_bytes;
- unsigned long long l2_write_bytes;
- unsigned long long l2_writes_sent;
- unsigned long long l2_writes_done;
- unsigned long long l2_writes_error;
- unsigned long long l2_writes_lock_retry;
- unsigned long long l2_evict_lock_retry;
- unsigned long long l2_evict_reading;
- unsigned long long l2_evict_l1cached;
- unsigned long long l2_free_on_write;
- unsigned long long l2_cdata_free_on_write;
- unsigned long long l2_abort_lowmem;
- unsigned long long l2_cksum_bad;
- unsigned long long l2_io_error;
- unsigned long long l2_size;
- unsigned long long l2_asize;
- unsigned long long l2_hdr_size;
- unsigned long long l2_compress_successes;
- unsigned long long l2_compress_zeros;
- unsigned long long l2_compress_failures;
- unsigned long long memory_throttle_count;
- unsigned long long duplicate_buffers;
- unsigned long long duplicate_buffers_size;
- unsigned long long duplicate_reads;
- unsigned long long memory_direct_count;
- unsigned long long memory_indirect_count;
- unsigned long long arc_no_grow;
- unsigned long long arc_tempreserve;
- unsigned long long arc_loaned_bytes;
- unsigned long long arc_prune;
- unsigned long long arc_meta_used;
- unsigned long long arc_meta_limit;
- unsigned long long arc_meta_max;
- unsigned long long arc_meta_min;
- unsigned long long arc_need_free;
- unsigned long long arc_sys_free;
-
- // flags
- int l2exist;
-};
-
-void generate_charts_arcstats(const char *plugin, const char *module, int show_zero_charts, int update_every);
-void generate_charts_arc_summary(const char *plugin, const char *module, int show_zero_charts, int update_every);
-
-#endif //NETDATA_ZFS_COMMON_H
diff --git a/collectors/profile.plugin/Makefile.am b/collectors/profile.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/profile.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/profile.plugin/README.md b/collectors/profile.plugin/README.md
deleted file mode 100644
index 1f200fc31..000000000
--- a/collectors/profile.plugin/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# profile.plugin
-
-This plugin allows someone to backfill an agent with random data.
-
-A user can specify:
-
- - The number charts they want,
- - the number of dimensions per chart,
- - the desire update every collection frequency,
- - the number of seconds to backfill.
- - the number of collection threads.
-
-## Configuration
-
-Edit the `netdata.conf` configuration file using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) from the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`.
-
-Scroll down to the `[plugin:profile]` section to find the available options:
-
-```
-[plugin:profile]
- update every = 5
- number of charts = 200
- number of dimensions per chart = 5
- seconds to backfill = 86400
- number of threads = 16
-```
-
-The `number of threads` option will create the specified number of collection
-threads. The rest of the options apply to each thread individually, eg. the
-above configuration will create 3200 charts, 16000 dimensions in total, which will be
-backfilled for the duration of 1 day.
-
-Note that all but the 1st chart created in each thread will be marked as hidden
-in order to ease the load on the dashboard's UI.
diff --git a/collectors/profile.plugin/plugin_profile.cc b/collectors/profile.plugin/plugin_profile.cc
deleted file mode 100644
index 5f7b22d25..000000000
--- a/collectors/profile.plugin/plugin_profile.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "daemon/common.h"
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <random>
-#include <thread>
-#include <vector>
-
-#define PLUGIN_PROFILE_NAME "profile.plugin"
-
-#define CONFIG_SECTION_PROFILE "plugin:profile"
-
-class Generator {
-public:
- Generator(size_t N) : Offset(0) {
- std::random_device RandDev;
- std::mt19937 Gen(RandDev());
- std::uniform_int_distribution<int> D(-16, 16);
-
- V.reserve(N);
- for (size_t Idx = 0; Idx != N; Idx++)
- V.push_back(D(Gen));
- }
-
- double getRandValue() {
- return V[Offset++ % V.size()];
- }
-
-private:
- size_t Offset;
- std::vector<double> V;
-};
-
-class Profiler {
-public:
- Profiler(size_t ID, size_t NumCharts, size_t NumDimsPerChart, time_t SecondsToBackfill, int UpdateEvery) :
- ID(ID),
- NumCharts(NumCharts),
- NumDimsPerChart(NumDimsPerChart),
- SecondsToBackfill(SecondsToBackfill),
- UpdateEvery(UpdateEvery),
- Gen(1024 * 1024)
- {}
-
- void create() {
- char ChartId[1024];
- char DimId[1024];
-
- Charts.reserve(NumCharts);
- for (size_t I = 0; I != NumCharts; I++) {
- size_t CID = ID + Charts.size() + 1;
-
- snprintfz(ChartId, 1024 - 1, "chart_%zu", CID);
-
- RRDSET *RS = rrdset_create_localhost(
- "profile", // type
- ChartId, // id
- nullptr, // name,
- "profile_family", // family
- "profile_context", // context
- "profile_title", // title
- "profile_units", // units
- "profile_plugin", // plugin
- "profile_module", // module
- 12345678 + CID, // priority
- UpdateEvery, // update_every
- RRDSET_TYPE_LINE // chart_type
- );
- if (I != 0)
- rrdset_flag_set(RS, RRDSET_FLAG_HIDDEN);
- Charts.push_back(RS);
-
- Dimensions.reserve(NumDimsPerChart);
- for (size_t J = 0; J != NumDimsPerChart; J++) {
- snprintfz(DimId, 1024 - 1, "dim_%zu", J);
-
- RRDDIM *RD = rrddim_add(
- RS, // st
- DimId, // id
- nullptr, // name
- 1, // multiplier
- 1, // divisor
- RRD_ALGORITHM_ABSOLUTE // algorithm
- );
-
- Dimensions.push_back(RD);
- }
- }
- }
-
- void update(const struct timeval &Now) {
- for (RRDSET *RS: Charts) {
- for (RRDDIM *RD : Dimensions) {
- rrddim_timed_set_by_pointer(RS, RD, Now, Gen.getRandValue());
- }
-
- rrdset_timed_done(RS, Now, RS->counter_done != 0);
- }
- }
-
- void run() {
- #define WORKER_JOB_CREATE_CHARTS 0
- #define WORKER_JOB_UPDATE_CHARTS 1
- #define WORKER_JOB_METRIC_DURATION_TO_BACKFILL 2
- #define WORKER_JOB_METRIC_POINTS_BACKFILLED 3
-
- worker_register("PROFILER");
- worker_register_job_name(WORKER_JOB_CREATE_CHARTS, "create charts");
- worker_register_job_name(WORKER_JOB_UPDATE_CHARTS, "update charts");
- worker_register_job_custom_metric(WORKER_JOB_METRIC_DURATION_TO_BACKFILL, "duration to backfill", "seconds", WORKER_METRIC_ABSOLUTE);
- worker_register_job_custom_metric(WORKER_JOB_METRIC_POINTS_BACKFILLED, "points backfilled", "points", WORKER_METRIC_ABSOLUTE);
-
- heartbeat_t HB;
- heartbeat_init(&HB);
-
- worker_is_busy(WORKER_JOB_CREATE_CHARTS);
- create();
-
- struct timeval CollectionTV;
- now_realtime_timeval(&CollectionTV);
-
- if (SecondsToBackfill) {
- CollectionTV.tv_sec -= SecondsToBackfill;
- CollectionTV.tv_sec -= (CollectionTV.tv_sec % UpdateEvery);
-
- CollectionTV.tv_usec = 0;
- }
-
- size_t BackfilledPoints = 0;
- struct timeval NowTV, PrevTV;
- now_realtime_timeval(&NowTV);
- PrevTV = NowTV;
-
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_busy(WORKER_JOB_UPDATE_CHARTS);
-
- update(CollectionTV);
- CollectionTV.tv_sec += UpdateEvery;
-
- now_realtime_timeval(&NowTV);
-
- ++BackfilledPoints;
- if (NowTV.tv_sec > PrevTV.tv_sec) {
- PrevTV = NowTV;
- worker_set_metric(WORKER_JOB_METRIC_POINTS_BACKFILLED, BackfilledPoints * NumCharts * NumDimsPerChart);
- BackfilledPoints = 0;
- }
-
- size_t RemainingSeconds = (CollectionTV.tv_sec >= NowTV.tv_sec) ? 0 : (NowTV.tv_sec - CollectionTV.tv_sec);
- worker_set_metric(WORKER_JOB_METRIC_DURATION_TO_BACKFILL, RemainingSeconds);
-
- if (CollectionTV.tv_sec >= NowTV.tv_sec) {
- worker_is_idle();
- heartbeat_next(&HB, UpdateEvery * USEC_PER_SEC);
- }
- }
- }
-
-private:
- size_t ID;
- size_t NumCharts;
- size_t NumDimsPerChart;
- size_t SecondsToBackfill;
- int UpdateEvery;
-
- Generator Gen;
- std::vector<RRDSET *> Charts;
- std::vector<RRDDIM *> Dimensions;
-};
-
-static void *subprofile_main(void* Arg) {
- Profiler *P = reinterpret_cast<Profiler *>(Arg);
- P->run();
- return nullptr;
-}
-
-static void profile_main_cleanup(void *ptr) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *) ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- netdata_log_info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-extern "C" void *profile_main(void *ptr) {
- netdata_thread_cleanup_push(profile_main_cleanup, ptr);
-
- int UpdateEvery = (int) config_get_number(CONFIG_SECTION_PROFILE, "update every", 1);
- if (UpdateEvery < localhost->rrd_update_every)
- UpdateEvery = localhost->rrd_update_every;
-
- // pick low-default values, in case this plugin is ever enabled accidentaly.
- size_t NumThreads = config_get_number(CONFIG_SECTION_PROFILE, "number of threads", 2);
- size_t NumCharts = config_get_number(CONFIG_SECTION_PROFILE, "number of charts", 2);
- size_t NumDimsPerChart = config_get_number(CONFIG_SECTION_PROFILE, "number of dimensions per chart", 2);
- size_t SecondsToBackfill = config_get_number(CONFIG_SECTION_PROFILE, "seconds to backfill", 10 * 60);
-
- std::vector<Profiler> Profilers;
-
- for (size_t Idx = 0; Idx != NumThreads; Idx++) {
- Profiler P(1e8 + Idx * 1e6, NumCharts, NumDimsPerChart, SecondsToBackfill, UpdateEvery);
- Profilers.push_back(P);
- }
-
- std::vector<netdata_thread_t> Threads(NumThreads);
-
- for (size_t Idx = 0; Idx != NumThreads; Idx++) {
- char Tag[NETDATA_THREAD_TAG_MAX + 1];
-
- snprintfz(Tag, NETDATA_THREAD_TAG_MAX, "PROFILER[%zu]", Idx);
- netdata_thread_create(&Threads[Idx], Tag, NETDATA_THREAD_OPTION_JOINABLE, subprofile_main, static_cast<void *>(&Profilers[Idx]));
- }
-
- for (size_t Idx = 0; Idx != NumThreads; Idx++)
- netdata_thread_join(Threads[Idx], nullptr);
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
deleted file mode 100644
index ca49c1c02..000000000
--- a/collectors/python.d.plugin/Makefile.am
+++ /dev/null
@@ -1,229 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- python.d.plugin \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_libconfig_DATA = \
- python.d.conf \
- $(NULL)
-
-dist_plugins_SCRIPTS = \
- python.d.plugin \
- $(NULL)
-
-dist_noinst_DATA = \
- python.d.plugin.in \
- README.md \
- $(NULL)
-
-dist_python_SCRIPTS = \
- $(NULL)
-
-dist_python_DATA = \
- $(NULL)
-
-userpythonconfigdir=$(configdir)/python.d
-dist_userpythonconfig_DATA = \
- $(NULL)
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userpythonconfigdir)
-
-pythonconfigdir=$(libconfigdir)/python.d
-dist_pythonconfig_DATA = \
- $(NULL)
-
-include adaptec_raid/Makefile.inc
-include alarms/Makefile.inc
-include am2320/Makefile.inc
-include anomalies/Makefile.inc
-include beanstalk/Makefile.inc
-include bind_rndc/Makefile.inc
-include boinc/Makefile.inc
-include ceph/Makefile.inc
-include changefinder/Makefile.inc
-include dovecot/Makefile.inc
-include example/Makefile.inc
-include exim/Makefile.inc
-include fail2ban/Makefile.inc
-include gearman/Makefile.inc
-include go_expvar/Makefile.inc
-include haproxy/Makefile.inc
-include hddtemp/Makefile.inc
-include hpssa/Makefile.inc
-include icecast/Makefile.inc
-include ipfs/Makefile.inc
-include litespeed/Makefile.inc
-include megacli/Makefile.inc
-include memcached/Makefile.inc
-include monit/Makefile.inc
-include nvidia_smi/Makefile.inc
-include nsd/Makefile.inc
-include openldap/Makefile.inc
-include oracledb/Makefile.inc
-include pandas/Makefile.inc
-include postfix/Makefile.inc
-include puppet/Makefile.inc
-include rethinkdbs/Makefile.inc
-include retroshare/Makefile.inc
-include riakkv/Makefile.inc
-include samba/Makefile.inc
-include sensors/Makefile.inc
-include smartd_log/Makefile.inc
-include spigotmc/Makefile.inc
-include squid/Makefile.inc
-include tomcat/Makefile.inc
-include tor/Makefile.inc
-include traefik/Makefile.inc
-include uwsgi/Makefile.inc
-include varnish/Makefile.inc
-include w1sensor/Makefile.inc
-include zscores/Makefile.inc
-
-pythonmodulesdir=$(pythondir)/python_modules
-dist_pythonmodules_DATA = \
- python_modules/__init__.py \
- $(NULL)
-
-basesdir=$(pythonmodulesdir)/bases
-dist_bases_DATA = \
- python_modules/bases/__init__.py \
- python_modules/bases/charts.py \
- python_modules/bases/collection.py \
- python_modules/bases/loaders.py \
- python_modules/bases/loggers.py \
- $(NULL)
-
-bases_framework_servicesdir=$(basesdir)/FrameworkServices
-dist_bases_framework_services_DATA = \
- python_modules/bases/FrameworkServices/__init__.py \
- python_modules/bases/FrameworkServices/ExecutableService.py \
- python_modules/bases/FrameworkServices/LogService.py \
- python_modules/bases/FrameworkServices/MySQLService.py \
- python_modules/bases/FrameworkServices/SimpleService.py \
- python_modules/bases/FrameworkServices/SocketService.py \
- python_modules/bases/FrameworkServices/UrlService.py \
- $(NULL)
-
-third_partydir=$(pythonmodulesdir)/third_party
-dist_third_party_DATA = \
- python_modules/third_party/__init__.py \
- python_modules/third_party/ordereddict.py \
- python_modules/third_party/lm_sensors.py \
- python_modules/third_party/mcrcon.py \
- python_modules/third_party/boinc_client.py \
- python_modules/third_party/monotonic.py \
- python_modules/third_party/filelock.py \
- $(NULL)
-
-pythonyaml2dir=$(pythonmodulesdir)/pyyaml2
-dist_pythonyaml2_DATA = \
- python_modules/pyyaml2/__init__.py \
- python_modules/pyyaml2/composer.py \
- python_modules/pyyaml2/constructor.py \
- python_modules/pyyaml2/cyaml.py \
- python_modules/pyyaml2/dumper.py \
- python_modules/pyyaml2/emitter.py \
- python_modules/pyyaml2/error.py \
- python_modules/pyyaml2/events.py \
- python_modules/pyyaml2/loader.py \
- python_modules/pyyaml2/nodes.py \
- python_modules/pyyaml2/parser.py \
- python_modules/pyyaml2/reader.py \
- python_modules/pyyaml2/representer.py \
- python_modules/pyyaml2/resolver.py \
- python_modules/pyyaml2/scanner.py \
- python_modules/pyyaml2/serializer.py \
- python_modules/pyyaml2/tokens.py \
- $(NULL)
-
-pythonyaml3dir=$(pythonmodulesdir)/pyyaml3
-dist_pythonyaml3_DATA = \
- python_modules/pyyaml3/__init__.py \
- python_modules/pyyaml3/composer.py \
- python_modules/pyyaml3/constructor.py \
- python_modules/pyyaml3/cyaml.py \
- python_modules/pyyaml3/dumper.py \
- python_modules/pyyaml3/emitter.py \
- python_modules/pyyaml3/error.py \
- python_modules/pyyaml3/events.py \
- python_modules/pyyaml3/loader.py \
- python_modules/pyyaml3/nodes.py \
- python_modules/pyyaml3/parser.py \
- python_modules/pyyaml3/reader.py \
- python_modules/pyyaml3/representer.py \
- python_modules/pyyaml3/resolver.py \
- python_modules/pyyaml3/scanner.py \
- python_modules/pyyaml3/serializer.py \
- python_modules/pyyaml3/tokens.py \
- $(NULL)
-
-python_urllib3dir=$(pythonmodulesdir)/urllib3
-dist_python_urllib3_DATA = \
- python_modules/urllib3/__init__.py \
- python_modules/urllib3/_collections.py \
- python_modules/urllib3/connection.py \
- python_modules/urllib3/connectionpool.py \
- python_modules/urllib3/exceptions.py \
- python_modules/urllib3/fields.py \
- python_modules/urllib3/filepost.py \
- python_modules/urllib3/response.py \
- python_modules/urllib3/poolmanager.py \
- python_modules/urllib3/request.py \
- $(NULL)
-
-python_urllib3_utildir=$(python_urllib3dir)/util
-dist_python_urllib3_util_DATA = \
- python_modules/urllib3/util/__init__.py \
- python_modules/urllib3/util/connection.py \
- python_modules/urllib3/util/request.py \
- python_modules/urllib3/util/response.py \
- python_modules/urllib3/util/retry.py \
- python_modules/urllib3/util/selectors.py \
- python_modules/urllib3/util/ssl_.py \
- python_modules/urllib3/util/timeout.py \
- python_modules/urllib3/util/url.py \
- python_modules/urllib3/util/wait.py \
- $(NULL)
-
-python_urllib3_packagesdir=$(python_urllib3dir)/packages
-dist_python_urllib3_packages_DATA = \
- python_modules/urllib3/packages/__init__.py \
- python_modules/urllib3/packages/ordered_dict.py \
- python_modules/urllib3/packages/six.py \
- $(NULL)
-
-python_urllib3_backportsdir=$(python_urllib3_packagesdir)/backports
-dist_python_urllib3_backports_DATA = \
- python_modules/urllib3/packages/backports/__init__.py \
- python_modules/urllib3/packages/backports/makefile.py \
- $(NULL)
-
-python_urllib3_ssl_match_hostnamedir=$(python_urllib3_packagesdir)/ssl_match_hostname
-dist_python_urllib3_ssl_match_hostname_DATA = \
- python_modules/urllib3/packages/ssl_match_hostname/__init__.py \
- python_modules/urllib3/packages/ssl_match_hostname/_implementation.py \
- $(NULL)
-
-python_urllib3_contribdir=$(python_urllib3dir)/contrib
-dist_python_urllib3_contrib_DATA = \
- python_modules/urllib3/contrib/__init__.py \
- python_modules/urllib3/contrib/appengine.py \
- python_modules/urllib3/contrib/ntlmpool.py \
- python_modules/urllib3/contrib/pyopenssl.py \
- python_modules/urllib3/contrib/securetransport.py \
- python_modules/urllib3/contrib/socks.py \
- $(NULL)
-
-python_urllib3_securetransportdir=$(python_urllib3_contribdir)/_securetransport
-dist_python_urllib3_securetransport_DATA = \
- python_modules/urllib3/contrib/_securetransport/__init__.py \
- python_modules/urllib3/contrib/_securetransport/bindings.py \
- python_modules/urllib3/contrib/_securetransport/low_level.py \
- $(NULL)
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
deleted file mode 100644
index 569543d16..000000000
--- a/collectors/python.d.plugin/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-<!--
-title: "python.d.plugin"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md"
-sidebar_label: "python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "Tasks"
-learn_rel_path: "Developers/External plugins/python.d.plugin"
--->
-
-# python.d.plugin
-
-`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
-
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by Netdata
-3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
-4. Supports any number of data collection **modules**
-5. Allows each **module** to have one or more data collection **jobs**
-6. Each **job** is collecting one or more metrics from a single data source
-
-## Disclaimer
-
-All third party libraries should be installed system-wide or in `python_modules` directory.
-Module configurations are written in YAML and **pyYAML is required**.
-
-Every configuration file must have one of two formats:
-
-- Configuration for only one job:
-
-```yaml
-update_every : 2 # update frequency
-priority : 20000 # where it is shown on dashboard
-
-other_var1 : bla # variables passed to module
-other_var2 : alb
-```
-
-- Configuration for many jobs (ex. mysql):
-
-```yaml
-# module defaults:
-update_every : 2
-priority : 20000
-
-local: # job name
- update_every : 5 # job update frequency
- other_var1 : some_val # module specific variable
-
-other_job:
- priority : 5 # job position on dashboard
- other_var2 : val # module specific variable
-```
-
-`update_every` and `priority` are always optional.
-
-## How to debug a python module
-
-```
-# become user netdata
-sudo su -s /bin/bash netdata
-```
-
-Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
-
-```
-# execute the plugin in debug mode, for a specific module
-/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
-/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
-```
-
-Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin>
-
-**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
-
-## How to write a new module
-
-See [develop a custom collector in Python](https://github.com/netdata/netdata/edit/master/docs/guides/python-collector.md).
diff --git a/collectors/python.d.plugin/adaptec_raid/Makefile.inc b/collectors/python.d.plugin/adaptec_raid/Makefile.inc
deleted file mode 100644
index 716cdb235..000000000
--- a/collectors/python.d.plugin/adaptec_raid/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += adaptec_raid/adaptec_raid.chart.py
-dist_pythonconfig_DATA += adaptec_raid/adaptec_raid.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += adaptec_raid/README.md adaptec_raid/Makefile.inc
-
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
deleted file mode 120000
index 97a103eb9..000000000
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/adaptecraid.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
deleted file mode 100644
index 1995ad681..000000000
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: adaptec_raid netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-import re
-from copy import deepcopy
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-update_every = 5
-
-ORDER = [
- 'ld_status',
- 'pd_state',
- 'pd_smart_warnings',
- 'pd_temperature',
-]
-
-CHARTS = {
- 'ld_status': {
- 'options': [None, 'Status of logical devices (1: Failed or Degraded)', 'bool', 'logical devices',
- 'adaptec_raid.ld_status', 'line'],
- 'lines': []
- },
- 'pd_state': {
- 'options': [None, 'State of physical devices (1: not Online)', 'bool', 'physical devices',
- 'adaptec_raid.pd_state', 'line'],
- 'lines': []
- },
- 'pd_smart_warnings': {
- 'options': [None, 'S.M.A.R.T warnings', 'count', 'physical devices',
- 'adaptec_raid.smart_warnings', 'line'],
- 'lines': []
- },
- 'pd_temperature': {
- 'options': [None, 'Temperature', 'celsius', 'physical devices', 'adaptec_raid.temperature', 'line'],
- 'lines': []
- },
-}
-
-SUDO = 'sudo'
-ARCCONF = 'arcconf'
-
-BAD_LD_STATUS = (
- 'Degraded',
- 'Failed',
-)
-
-GOOD_PD_STATUS = (
- 'Online',
-)
-
-RE_LD = re.compile(
- r'Logical [dD]evice number\s+([0-9]+).*?'
- r'Status of [lL]ogical [dD]evice\s+: ([a-zA-Z]+)'
-)
-
-
-def find_lds(d):
- d = ' '.join(v.strip() for v in d)
- return [LD(*v) for v in RE_LD.findall(d)]
-
-
-def find_pds(d):
- pds = list()
- pd = PD()
-
- for row in d:
- row = row.strip()
- if row.startswith('Device #'):
- pd = PD()
- pd.id = row.split('#')[-1]
- elif not pd.id:
- continue
-
- if row.startswith('State'):
- v = row.split()[-1]
- pd.state = v
- elif row.startswith('S.M.A.R.T. warnings'):
- v = row.split()[-1]
- pd.smart_warnings = v
- elif row.startswith('Temperature'):
- v = row.split(':')[-1].split()[0]
- pd.temperature = v
- elif row.startswith(('NCQ status', 'Device Phy')) or not row:
- if pd.id and pd.state and pd.smart_warnings:
- pds.append(pd)
- pd = PD()
-
- return pds
-
-
-class LD:
- def __init__(self, ld_id, status):
- self.id = ld_id
- self.status = status
-
- def data(self):
- return {
- 'ld_{0}_status'.format(self.id): int(self.status in BAD_LD_STATUS)
- }
-
-
-class PD:
- def __init__(self):
- self.id = None
- self.state = None
- self.smart_warnings = None
- self.temperature = None
-
- def data(self):
- data = {
- 'pd_{0}_state'.format(self.id): int(self.state not in GOOD_PD_STATUS),
- 'pd_{0}_smart_warnings'.format(self.id): self.smart_warnings,
- }
- if self.temperature and self.temperature.isdigit():
- data['pd_{0}_temperature'.format(self.id)] = self.temperature
-
- return data
-
-
-class Arcconf:
- def __init__(self, arcconf):
- self.arcconf = arcconf
-
- def ld_info(self):
- return [self.arcconf, 'GETCONFIG', '1', 'LD']
-
- def pd_info(self):
- return [self.arcconf, 'GETCONFIG', '1', 'PD']
-
-
-# TODO: hardcoded sudo...
-class SudoArcconf:
- def __init__(self, arcconf, sudo):
- self.arcconf = Arcconf(arcconf)
- self.sudo = sudo
-
- def ld_info(self):
- return [self.sudo, '-n'] + self.arcconf.ld_info()
-
- def pd_info(self):
- return [self.sudo, '-n'] + self.arcconf.pd_info()
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.use_sudo = self.configuration.get('use_sudo', True)
- self.arcconf = None
-
- def execute(self, command, stderr=False):
- return self._get_raw_data(command=command, stderr=stderr)
-
- def check(self):
- arcconf = find_binary(ARCCONF)
- if not arcconf:
- self.error('can\'t locate "{0}" binary'.format(ARCCONF))
- return False
-
- sudo = find_binary(SUDO)
- if self.use_sudo:
- if not sudo:
- self.error('can\'t locate "{0}" binary'.format(SUDO))
- return False
- err = self.execute([sudo, '-n', '-v'], True)
- if err:
- self.error(' '.join(err))
- return False
-
- if self.use_sudo:
- self.arcconf = SudoArcconf(arcconf, sudo)
- else:
- self.arcconf = Arcconf(arcconf)
-
- lds = self.get_lds()
- if not lds:
- return False
-
- self.debug('discovered logical devices ids: {0}'.format([ld.id for ld in lds]))
-
- pds = self.get_pds()
- if not pds:
- return False
-
- self.debug('discovered physical devices ids: {0}'.format([pd.id for pd in pds]))
-
- self.update_charts(lds, pds)
- return True
-
- def get_data(self):
- data = dict()
-
- for ld in self.get_lds():
- data.update(ld.data())
-
- for pd in self.get_pds():
- data.update(pd.data())
-
- return data
-
- def get_lds(self):
- raw_lds = self.execute(self.arcconf.ld_info())
- if not raw_lds:
- return None
-
- lds = find_lds(raw_lds)
- if not lds:
- self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.ld_info())))
- self.debug('output: {0}'.format(raw_lds))
- return None
- return lds
-
- def get_pds(self):
- raw_pds = self.execute(self.arcconf.pd_info())
- if not raw_pds:
- return None
-
- pds = find_pds(raw_pds)
- if not pds:
- self.error('failed to parse "{0}" output'.format(' '.join(self.arcconf.pd_info())))
- self.debug('output: {0}'.format(raw_pds))
- return None
- return pds
-
- def update_charts(self, lds, pds):
- charts = self.definitions
- for ld in lds:
- dim = ['ld_{0}_status'.format(ld.id), 'ld {0}'.format(ld.id)]
- charts['ld_status']['lines'].append(dim)
-
- for pd in pds:
- dim = ['pd_{0}_state'.format(pd.id), 'pd {0}'.format(pd.id)]
- charts['pd_state']['lines'].append(dim)
-
- dim = ['pd_{0}_smart_warnings'.format(pd.id), 'pd {0}'.format(pd.id)]
- charts['pd_smart_warnings']['lines'].append(dim)
-
- dim = ['pd_{0}_temperature'.format(pd.id), 'pd {0}'.format(pd.id)]
- charts['pd_temperature']['lines'].append(dim)
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
deleted file mode 100644
index fa462ec83..000000000
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.conf
+++ /dev/null
@@ -1,53 +0,0 @@
-# netdata python.d.plugin configuration for adaptec raid
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
deleted file mode 100644
index 13d22ba54..000000000
--- a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
+++ /dev/null
@@ -1,204 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/metadata.yaml"
-sidebar_label: "AdaptecRAID"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# AdaptecRAID
-
-
-<img src="https://netdata.cloud/img/adaptec.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: adaptec_raid
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.
-
-
-It uses the arcconf command line utility (from adaptec) to monitor your raid controller.
-
-Executed commands:
- - `sudo -n arcconf GETCONFIG 1 LD`
- - `sudo -n arcconf GETCONFIG 1 PD`
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
-
-### Default Behavior
-
-#### Auto-Detection
-
-After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per AdaptecRAID instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| adaptec_raid.ld_status | a dimension per logical device | bool |
-| adaptec_raid.pd_state | a dimension per physical device | bool |
-| adaptec_raid.smart_warnings | a dimension per physical device | count |
-| adaptec_raid.temperature | a dimension per physical device | celsius |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |
-| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |
-
-
-## Setup
-
-### Prerequisites
-
-#### Grant permissions for netdata, to run arcconf as sudoer
-
-The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
-
-Add to your /etc/sudoers file:
-which arcconf shows the full path to the binary.
-
-```bash
-netdata ALL=(root) NOPASSWD: /path/to/arcconf
-```
-
-
-#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
-
-The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
-
-As root user, do the following:
-
-```bash
-mkdir /etc/systemd/system/netdata.service.d
-echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
-systemctl daemon-reload
-systemctl restart netdata.service
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/adaptec_raid.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/adaptec_raid.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration per job
-
-```yaml
-job_name:
- name: my_job_name
- update_every: 1 # the JOB's data collection frequency
- priority: 60000 # the JOB's order on the dashboard
- penalty: yes # the JOB's penalty
- autodetection_retry: 0 # the JOB's re-check interval in seconds
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin adaptec_raid debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/adaptec_raid/metadata.yaml b/collectors/python.d.plugin/adaptec_raid/metadata.yaml
deleted file mode 100644
index c69baff4a..000000000
--- a/collectors/python.d.plugin/adaptec_raid/metadata.yaml
+++ /dev/null
@@ -1,167 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: adaptec_raid
- monitored_instance:
- name: AdaptecRAID
- link: "https://www.microchip.com/en-us/products/storage"
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "adaptec.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - storage
- - raid-controller
- - manage-disks
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.
- method_description: |
- It uses the arcconf command line utility (from adaptec) to monitor your raid controller.
-
- Executed commands:
- - `sudo -n arcconf GETCONFIG 1 LD`
- - `sudo -n arcconf GETCONFIG 1 PD`
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: "The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password."
- default_behavior:
- auto_detection:
- description: "After all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Grant permissions for netdata, to run arcconf as sudoer
- description: |
- The module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.
-
- Add to your /etc/sudoers file:
- which arcconf shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/arcconf
- ```
- - title: Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
- description: |
- The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
-
- As root user, do the following:
-
- ```bash
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
- configuration:
- file:
- name: "python.d/adaptec_raid.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration per job
- config: |
- job_name:
- name: my_job_name
- update_every: 1 # the JOB's data collection frequency
- priority: 60000 # the JOB's order on the dashboard
- penalty: yes # the JOB's penalty
- autodetection_retry: 0 # the JOB's re-check interval in seconds
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: adaptec_raid_ld_status
- link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
- metric: adaptec_raid.ld_status
- info: logical device status is failed or degraded
- - name: adaptec_raid_pd_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/adaptec_raid.conf
- metric: adaptec_raid.pd_state
- info: physical device state is not online
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: adaptec_raid.ld_status
- description: "Status of logical devices (1: Failed or Degraded)"
- unit: "bool"
- chart_type: line
- dimensions:
- - name: a dimension per logical device
- - name: adaptec_raid.pd_state
- description: "State of physical devices (1: not Online)"
- unit: "bool"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
- - name: adaptec_raid.smart_warnings
- description: S.M.A.R.T warnings
- unit: "count"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
- - name: adaptec_raid.temperature
- description: Temperature
- unit: "celsius"
- chart_type: line
- dimensions:
- - name: a dimension per physical device
diff --git a/collectors/python.d.plugin/alarms/Makefile.inc b/collectors/python.d.plugin/alarms/Makefile.inc
deleted file mode 100644
index c2de11724..000000000
--- a/collectors/python.d.plugin/alarms/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += alarms/alarms.chart.py
-dist_pythonconfig_DATA += alarms/alarms.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += alarms/README.md alarms/Makefile.inc
-
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
deleted file mode 120000
index 85759ae6c..000000000
--- a/collectors/python.d.plugin/alarms/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/netdata_agent_alarms.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/alarms/alarms.chart.py b/collectors/python.d.plugin/alarms/alarms.chart.py
deleted file mode 100644
index d19427358..000000000
--- a/collectors/python.d.plugin/alarms/alarms.chart.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: alarms netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 10
-disabled_by_default = True
-
-
-def charts_template(sm, alarm_status_chart_type='line'):
- order = [
- 'alarms',
- 'values'
- ]
-
- mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
- charts = {
- 'alarms': {
- 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'status', 'alarms.status', alarm_status_chart_type],
- 'lines': [],
- 'variables': [
- ['alarms_num'],
- ]
- },
- 'values': {
- 'options': [None, 'Alarm Values', 'value', 'value', 'alarms.value', 'line'],
- 'lines': [],
- }
- }
- return order, charts
-
-
-DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
-DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
-DEFAULT_COLLECT_ALARM_VALUES = False
-DEFAULT_ALARM_STATUS_CHART_TYPE = 'line'
-DEFAULT_ALARM_CONTAINS_WORDS = ''
-DEFAULT_ALARM_EXCLUDES_WORDS = ''
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
- self.alarm_status_chart_type = self.configuration.get('alarm_status_chart_type', DEFAULT_ALARM_STATUS_CHART_TYPE)
- self.order, self.definitions = charts_template(self.sm, self.alarm_status_chart_type)
- self.url = self.configuration.get('url', DEFAULT_URL)
- self.collect_alarm_values = bool(self.configuration.get('collect_alarm_values', DEFAULT_COLLECT_ALARM_VALUES))
- self.collected_dims = {'alarms': set(), 'values': set()}
- self.alarm_contains_words = self.configuration.get('alarm_contains_words', DEFAULT_ALARM_CONTAINS_WORDS)
- self.alarm_contains_words_list = [alarm_contains_word.lstrip(' ').rstrip(' ') for alarm_contains_word in self.alarm_contains_words.split(',')]
- self.alarm_excludes_words = self.configuration.get('alarm_excludes_words', DEFAULT_ALARM_EXCLUDES_WORDS)
- self.alarm_excludes_words_list = [alarm_excludes_word.lstrip(' ').rstrip(' ') for alarm_excludes_word in self.alarm_excludes_words.split(',')]
-
- def _get_data(self):
- raw_data = self._get_raw_data()
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
- alarms = raw_data.get('alarms', {})
- if self.alarm_contains_words != '':
- alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_contains_word in
- self.alarm_contains_words_list if alarm_contains_word in alarm_name}
- if self.alarm_excludes_words != '':
- alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_excludes_word in
- self.alarm_excludes_words_list if alarm_excludes_word not in alarm_name}
-
- data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
- self.update_charts('alarms', data)
- data['alarms_num'] = len(data)
-
- if self.collect_alarm_values:
- data_values = {'{}_value'.format(a): alarms[a]['value'] * 100 for a in alarms if 'value' in alarms[a] and alarms[a]['value'] is not None}
- self.update_charts('values', data_values, divisor=100)
- data.update(data_values)
-
- return data
-
- def update_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- if not self.charts:
- return
-
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
diff --git a/collectors/python.d.plugin/alarms/alarms.conf b/collectors/python.d.plugin/alarms/alarms.conf
deleted file mode 100644
index 06d76c3b3..000000000
--- a/collectors/python.d.plugin/alarms/alarms.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-# what url to pull data from
-local:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- # define how to map alarm status to numbers for the chart
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- # set to true to include a chart with calculated alarm values over time
- collect_alarm_values: false
- # define the type of chart for plotting status over time e.g. 'line' or 'stacked'
- alarm_status_chart_type: 'line'
- # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only
- # alarms with "cpu" or "load" in alarm name. Default includes all.
- alarm_contains_words: ''
- # a "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude
- # all alarms with "cpu" or "load" in alarm name. Default excludes None.
- alarm_excludes_words: ''
diff --git a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
deleted file mode 100644
index 9fb69878a..000000000
--- a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/metadata.yaml"
-sidebar_label: "Netdata Agent alarms"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Netdata Agent alarms
-
-Plugin: python.d.plugin
-Module: alarms
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
-
-
-Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Netdata Agent alarms instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |
-| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/alarms.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/alarms.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |
-| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes |
-| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |
-| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |
-| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes |
-| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-jobs:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
-
-```
-##### Advanced
-
-An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
-"ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
-
-
-<details><summary>Config</summary>
-
-```yaml
-ML:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: true
- alarm_status_chart_type: 'stacked'
- alarm_contains_words: 'ml_'
-
-Default:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: false
- alarm_status_chart_type: 'stacked'
- alarm_excludes_words: 'ml_'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin alarms debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/alarms/metadata.yaml b/collectors/python.d.plugin/alarms/metadata.yaml
deleted file mode 100644
index 30a897787..000000000
--- a/collectors/python.d.plugin/alarms/metadata.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: alarms
- monitored_instance:
- name: Netdata Agent alarms
- link: https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/alarms/README.md
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - alarms
- - netdata
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
- method_description: |
- Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/alarms.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: url
- description: Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent.
- default_value: http://127.0.0.1:19999/api/v1/alarms?all
- required: true
- - name: status_map
- description: Mapping of alarm status to integer number that will be the metric value collected.
- default_value: '{"CLEAR": 0, "WARNING": 1, "CRITICAL": 2}'
- required: true
- - name: collect_alarm_values
- description: set to true to include a chart with calculated alarm values over time.
- default_value: false
- required: true
- - name: alarm_status_chart_type
- description: define the type of chart for plotting status over time e.g. 'line' or 'stacked'.
- default_value: "line"
- required: true
- - name: alarm_contains_words
- description: >
- A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all.
- default_value: ""
- required: true
- - name: alarm_excludes_words
- description: >
- A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None.
- default_value: ""
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- jobs:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- - name: Advanced
- folding:
- enabled: true
- description: |
- An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
- "ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
- config: |
- ML:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: true
- alarm_status_chart_type: 'stacked'
- alarm_contains_words: 'ml_'
-
- Default:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: false
- alarm_status_chart_type: 'stacked'
- alarm_excludes_words: 'ml_'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: alarms.status
- description: Alarms ({status mapping})
- unit: "status"
- chart_type: line
- dimensions:
- - name: a dimension per alarm representing the latest status of the alarm.
- - name: alarms.values
- description: Alarm Values
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per alarm representing the latest collected value of the alarm.
diff --git a/collectors/python.d.plugin/am2320/Makefile.inc b/collectors/python.d.plugin/am2320/Makefile.inc
deleted file mode 100644
index 48e5a8892..000000000
--- a/collectors/python.d.plugin/am2320/Makefile.inc
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# install these files
-dist_python_DATA += am2320/am2320.chart.py
-dist_pythonconfig_DATA += am2320/am2320.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += am2320/README.md am2320/Makefile.inc
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
deleted file mode 120000
index 0bc5ea90e..000000000
--- a/collectors/python.d.plugin/am2320/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/am2320.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/am2320/am2320.chart.py b/collectors/python.d.plugin/am2320/am2320.chart.py
deleted file mode 100644
index 8e66544bd..000000000
--- a/collectors/python.d.plugin/am2320/am2320.chart.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# _*_ coding: utf-8 _*_
-# Description: AM2320 netdata module
-# Author: tommybuck
-# SPDX-License-Identifier: GPL-3.0-or-Later
-
-try:
- import board
- import busio
- import adafruit_am2320
-
- HAS_AM2320 = True
-except ImportError:
- HAS_AM2320 = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-ORDER = [
- 'temperature',
- 'humidity',
-]
-
-CHARTS = {
- 'temperature': {
- 'options': [None, 'Temperature', 'celsius', 'temperature', 'am2320.temperature', 'line'],
- 'lines': [
- ['temperature']
- ]
- },
- 'humidity': {
- 'options': [None, 'Relative Humidity', 'percentage', 'humidity', 'am2320.humidity', 'line'],
- 'lines': [
- ['humidity']
- ]
- }
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.am = None
-
- def check(self):
- if not HAS_AM2320:
- self.error("Could not find the adafruit-circuitpython-am2320 package.")
- return False
-
- try:
- i2c = busio.I2C(board.SCL, board.SDA)
- self.am = adafruit_am2320.AM2320(i2c)
- except ValueError as error:
- self.error("error on creating I2C shared bus : {0}".format(error))
- return False
-
- return True
-
- def get_data(self):
- try:
- return {
- 'temperature': self.am.temperature,
- 'humidity': self.am.relative_humidity,
- }
-
- except (OSError, RuntimeError) as error:
- self.error(error)
- return None
diff --git a/collectors/python.d.plugin/am2320/am2320.conf b/collectors/python.d.plugin/am2320/am2320.conf
deleted file mode 100644
index c6b9885fc..000000000
--- a/collectors/python.d.plugin/am2320/am2320.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for am2320 temperature/humidity sensor
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# - none
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/am2320/integrations/am2320.md b/collectors/python.d.plugin/am2320/integrations/am2320.md
deleted file mode 100644
index 72b351eb5..000000000
--- a/collectors/python.d.plugin/am2320/integrations/am2320.md
+++ /dev/null
@@ -1,181 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/metadata.yaml"
-sidebar_label: "AM2320"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# AM2320
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: am2320
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors AM2320 sensor metrics about temperature and humidity.
-
-It retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming prerequisites are met, the collector will try to connect to the sensor via i2c
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per AM2320 instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| am2320.temperature | temperature | celsius |
-| am2320.humidity | humidity | percentage |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Sensor connection to a Raspberry Pi
-
-Connect the am2320 to the Raspberry Pi I2C pins
-
-Raspberry Pi 3B/4 Pins:
-
-- Board 3.3V (pin 1) to sensor VIN (pin 1)
-- Board SDA (pin 3) to sensor SDA (pin 2)
-- Board GND (pin 6) to sensor GND (pin 3)
-- Board SCL (pin 5) to sensor SCL (pin 4)
-
-You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
-
-
-#### Software requirements
-
-Install the Adafruit Circuit Python AM2320 library:
-
-`sudo pip3 install adafruit-circuitpython-am2320`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/am2320.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/am2320.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Local sensor
-
-A basic JOB configuration
-
-```yaml
-local_sensor:
- name: 'Local AM2320'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin am2320 debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/am2320/metadata.yaml b/collectors/python.d.plugin/am2320/metadata.yaml
deleted file mode 100644
index c85cd5f22..000000000
--- a/collectors/python.d.plugin/am2320/metadata.yaml
+++ /dev/null
@@ -1,135 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: am2320
- monitored_instance:
- name: AM2320
- link: 'https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview'
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: 'microchip.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - temperature
- - am2320
- - sensor
- - humidity
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors AM2320 sensor metrics about temperature and humidity.'
- method_description: 'It retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'Assuming prerequisites are met, the collector will try to connect to the sensor via i2c'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Sensor connection to a Raspberry Pi'
- description: |
- Connect the am2320 to the Raspberry Pi I2C pins
-
- Raspberry Pi 3B/4 Pins:
-
- - Board 3.3V (pin 1) to sensor VIN (pin 1)
- - Board SDA (pin 3) to sensor SDA (pin 2)
- - Board GND (pin 6) to sensor GND (pin 3)
- - Board SCL (pin 5) to sensor SCL (pin 4)
-
- You may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.
- - title: 'Software requirements'
- description: |
- Install the Adafruit Circuit Python AM2320 library:
-
- `sudo pip3 install adafruit-circuitpython-am2320`
- configuration:
- file:
- name: python.d/am2320.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local sensor
- description: A basic JOB configuration
- config: |
- local_sensor:
- name: 'Local AM2320'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: am2320.temperature
- description: Temperature
- unit: "celsius"
- chart_type: line
- dimensions:
- - name: temperature
- - name: am2320.humidity
- description: Relative Humidity
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: humidity
diff --git a/collectors/python.d.plugin/anomalies/Makefile.inc b/collectors/python.d.plugin/anomalies/Makefile.inc
deleted file mode 100644
index 94937b36a..000000000
--- a/collectors/python.d.plugin/anomalies/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += anomalies/anomalies.chart.py
-dist_pythonconfig_DATA += anomalies/anomalies.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += anomalies/README.md anomalies/Makefile.inc
-
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
deleted file mode 100644
index 80f505375..000000000
--- a/collectors/python.d.plugin/anomalies/README.md
+++ /dev/null
@@ -1,248 +0,0 @@
-<!--
-title: "Anomaly detection with Netdata"
-description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md"
-sidebar_url: "Anomalies"
-sidebar_label: "anomalies"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/Anything"
--->
-
-# Anomaly detection with Netdata
-
-**Note**: Check out the [Netdata Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.md) for a more native anomaly detection experience within Netdata.
-
-This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
-
-Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
-
-> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
-
-## Charts
-
-Two charts are produced:
-
-- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
-- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
-
-Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
-
-![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
-
-If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
-
-![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
-
-Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
-
-![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
-
-## Requirements
-
-- This collector will only work with Python 3 and requires the packages below be installed.
-- Typically you will not need to do this, but, if needed, to ensure Python 3 is used you can add the below line to the `[plugin:python.d]` section of `netdata.conf`
-
-```conf
-[plugin:python.d]
- # update every = 1
- command options = -ppython3
-```
-
-Install the required python libraries.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages for the netdata user
-pip3 install --user netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
-```
-
-## Configuration
-
-Install the Python requirements above, enable the collector and restart Netdata.
-
-```bash
-cd /etc/netdata/
-sudo ./edit-config python.d.conf
-# Set `anomalies: no` to `anomalies: yes`
-sudo systemctl restart netdata
-```
-
-The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
-
-_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
-
-Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
-directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/anomalies.conf
-```
-
-The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
-
-```conf
-# -
-# JOBS (data collection sources)
-
-# Pull data from local Netdata node.
-anomalies:
- name: 'Anomalies'
-
- # Host to pull data from.
- host: '127.0.0.1:19999'
-
- # Username and Password for Netdata if using basic auth.
- # username: '???'
- # password: '???'
-
- # Use http or https to pull data
- protocol: 'http'
-
- # SSL verify parameter for requests.get() calls
- tls_verify: true
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime,system.entropy'
-
- # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
- # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
- model: 'pca'
-
- # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
- train_max_n: 100000
-
- # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
- # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
- train_every_n: 1800
-
- # The length of the window of data to train on (14400 = last 4 hours).
- train_n_secs: 14400
-
- # How many prediction steps after a train event to just use previous prediction value for.
- # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
- train_no_prediction_n: 10
-
- # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
- # Start of training data for initial model.
- # initial_train_data_after: 1604578857
-
- # End of training data for initial model.
- # initial_train_data_before: 1604593257
-
- # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
- offset_n_secs: 0
-
- # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
- lags_n: 5
-
- # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
- smooth_n: 3
-
- # How many differences to take in preprocessing your data.
- # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
- # diffs_n=0 would mean training models on the raw values of each dimension.
- # diffs_n=1 means everything is done in terms of differences.
- diffs_n: 1
-
- # What is the typical proportion of anomalies in your data on average?
- # This parameter can control the sensitivity of your models to anomalies.
- # Some discussion here: https://github.com/yzhao062/pyod/issues/144
- contamination: 0.001
-
- # Set to true to include an "average_prob" dimension on anomalies probability chart which is
- # just the average of all anomaly probabilities at each time step
- include_average_prob: true
-
- # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
- # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
- # and one on the cpu and mem apps metrics for the python.d.plugin.
- # custom_models:
- # - name: 'demos_cpu'
- # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
- # - name: 'apps_python_d_plugin'
- # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
-
- # Set to true to normalize, using min-max standardization, features used for the custom models.
- # Useful if your custom models contain dimensions on very different scales an model you use does
- # not internally do its own normalization. Usually best to leave as false.
- # custom_models_normalize: false
-```
-
-## Custom models
-
-In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
-
-To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
-
-`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
-
-```yaml
-custom_models:
- # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
- - name: 'user_netdata'
- dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
- # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
- - name: 'apps_python_d_plugin'
- dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
-
-custom_models_normalize: false
-```
-
-## Troubleshooting
-
-To see any relevant log messages you can use a command like below.
-
-```bash
-`grep 'anomalies' /var/log/netdata/error.log`
-```
-
-If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# run collector in debug using `nolock` option if netdata is already running the collector itself.
-/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
-```
-
-## Deepdive tutorial
-
-If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
-
-(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
-
-## Notes
-
-- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the required data for each chart.
-- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
-- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
-- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
-- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
-- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
- - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
- - `after` and `before` here refer to the start and end of the training data used to train the models.
-- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
- - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
- - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
-- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
-- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
-- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
-- If you would like to enable this on a Raspberry Pi, then check out [this guide](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/raspberry-pi-anomaly-detection.md) which will guide you through first installing LLVM.
-
-## Useful links and further reading
-
-- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
-- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
-- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
-- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
-- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
-- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
-- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
-
diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py
deleted file mode 100644
index 24e84cc15..000000000
--- a/collectors/python.d.plugin/anomalies/anomalies.chart.py
+++ /dev/null
@@ -1,425 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: anomalies netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import sys
-import time
-from datetime import datetime
-import re
-import warnings
-
-import requests
-import numpy as np
-import pandas as pd
-from netdata_pandas.data import get_data, get_allmetrics_async
-from pyod.models.hbos import HBOS
-from pyod.models.pca import PCA
-from pyod.models.loda import LODA
-from pyod.models.iforest import IForest
-from pyod.models.cblof import CBLOF
-from pyod.models.feature_bagging import FeatureBagging
-from pyod.models.copod import COPOD
-from sklearn.preprocessing import MinMaxScaler
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# ignore some sklearn/numpy warnings that are ok
-warnings.filterwarnings('ignore', r'All-NaN slice encountered')
-warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
-warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
-warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
-
-disabled_by_default = True
-
-ORDER = ['probability', 'anomaly']
-
-CHARTS = {
- 'probability': {
- 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
- 'lines': []
- },
- 'anomaly': {
- 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
- 'lines': []
- },
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.basic_init()
- self.charts_init()
- self.custom_models_init()
- self.data_init()
- self.model_params_init()
- self.models_init()
- self.collected_dims = {'probability': set(), 'anomaly': set()}
-
- def check(self):
- if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
- self.error("anomalies collector only works with Python>=3.6")
- if len(self.host_charts_dict[self.host]) > 0:
- _ = get_allmetrics_async(host_charts_dict=self.host_charts_dict, protocol=self.protocol, user=self.username, pwd=self.password)
- return True
-
- def basic_init(self):
- """Perform some basic initialization.
- """
- self.order = ORDER
- self.definitions = CHARTS
- self.protocol = self.configuration.get('protocol', 'http')
- self.host = self.configuration.get('host', '127.0.0.1:19999')
- self.username = self.configuration.get('username', None)
- self.password = self.configuration.get('password', None)
- self.tls_verify = self.configuration.get('tls_verify', True)
- self.fitted_at = {}
- self.df_allmetrics = pd.DataFrame()
- self.last_train_at = 0
- self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
- self.reinitialize_at_every_step = bool(self.configuration.get('reinitialize_at_every_step', False))
-
- def charts_init(self):
- """Do some initialisation of charts in scope related variables.
- """
- self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
- self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts', verify=self.tls_verify).json().get('charts', {}).keys())]
- self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- if len(self.charts_to_exclude) > 0:
- self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
-
- def custom_models_init(self):
- """Perform initialization steps related to custom models.
- """
- self.custom_models = self.configuration.get('custom_models', None)
- self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
- if self.custom_models:
- self.custom_models_names = [model['name'] for model in self.custom_models]
- self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
- self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
- self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
- self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
- self.custom_models_host_charts_dict = {}
- for host in self.custom_models_hosts:
- self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
- self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
- self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
- self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
- self.host_charts_dict = {self.host: self.charts_in_scope}
- for host in self.custom_models_host_charts_dict:
- if host not in self.host_charts_dict:
- self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
- else:
- for chart in self.custom_models_host_charts_dict[host]:
- if chart not in self.host_charts_dict[host]:
- self.host_charts_dict[host].extend(chart)
- else:
- self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
- self.host_charts_dict = {self.host: self.charts_in_scope}
- self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
- #self.info(f'self.host_charts_dict (len={len(self.host_charts_dict[self.host])}): {self.host_charts_dict}')
-
- def data_init(self):
- """Initialize some empty data objects.
- """
- self.data_probability_latest = {f'{m}_prob': 0 for m in self.charts_in_scope}
- self.data_anomaly_latest = {f'{m}_anomaly': 0 for m in self.charts_in_scope}
- self.data_latest = {**self.data_probability_latest, **self.data_anomaly_latest}
-
- def model_params_init(self):
- """Model parameters initialisation.
- """
- self.train_max_n = self.configuration.get('train_max_n', 100000)
- self.train_n_secs = self.configuration.get('train_n_secs', 14400)
- self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
- self.train_every_n = self.configuration.get('train_every_n', 1800)
- self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
- self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
- self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
- self.contamination = self.configuration.get('contamination', 0.001)
- self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
- self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
- self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
-
- def models_init(self):
- """Models initialisation.
- """
- self.model = self.configuration.get('model', 'pca')
- if self.model == 'pca':
- self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'loda':
- self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'iforest':
- self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'cblof':
- self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'feature_bagging':
- self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'copod':
- self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'hbos':
- self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
- else:
- self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
- self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
-
- def model_init(self, model):
- """Model initialisation of a single model.
- """
- if self.model == 'pca':
- self.models[model] = PCA(contamination=self.contamination)
- elif self.model == 'loda':
- self.models[model] = LODA(contamination=self.contamination)
- elif self.model == 'iforest':
- self.models[model] = IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination)
- elif self.model == 'cblof':
- self.models[model] = CBLOF(n_clusters=3, contamination=self.contamination)
- elif self.model == 'feature_bagging':
- self.models[model] = FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination)
- elif self.model == 'copod':
- self.models[model] = COPOD(contamination=self.contamination)
- elif self.model == 'hbos':
- self.models[model] = HBOS(contamination=self.contamination)
- else:
- self.models[model] = HBOS(contamination=self.contamination)
- self.custom_model_scalers[model] = MinMaxScaler()
-
- def reinitialize(self):
- """Reinitialize charts, models and data to a beginning state.
- """
- self.charts_init()
- self.custom_models_init()
- self.data_init()
- self.model_params_init()
- self.models_init()
-
- def save_data_latest(self, data, data_probability, data_anomaly):
- """Save the most recent data objects to be used if needed in the future.
- """
- self.data_latest = data
- self.data_probability_latest = data_probability
- self.data_anomaly_latest = data_anomaly
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def add_custom_models_dims(self, df):
- """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
-
- :param df <pd.DataFrame>: dataframe to append new renamed columns to.
- :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
- """
- df_custom = df[self.custom_models_dims].copy()
- df_custom.columns = self.custom_models_dims_renamed
- df = df.join(df_custom)
-
- return df
-
- def make_features(self, arr, train=False, model=None):
- """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
-
- :param arr <np.ndarray>: numpy array we want to make features from.
- :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
- :param model <str>: model to make features for.
- :return: <np.ndarray> transformed numpy array.
- """
-
- def lag(arr, n):
- res = np.empty_like(arr)
- res[:n] = np.nan
- res[n:] = arr[:-n]
-
- return res
-
- arr = np.nan_to_num(arr)
-
- diffs_n = self.diffs_n[model]
- smooth_n = self.smooth_n[model]
- lags_n = self.lags_n[model]
-
- if self.custom_models_normalize and model in self.custom_models_names:
- if train:
- arr = self.custom_model_scalers[model].fit_transform(arr)
- else:
- arr = self.custom_model_scalers[model].transform(arr)
-
- if diffs_n > 0:
- arr = np.diff(arr, diffs_n, axis=0)
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if smooth_n > 1:
- arr = np.cumsum(arr, axis=0, dtype=float)
- arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
- arr = arr[smooth_n - 1:] / smooth_n
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if lags_n > 0:
- arr_orig = np.copy(arr)
- for lag_n in range(1, lags_n + 1):
- arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if train:
- if len(arr) > self.train_max_n:
- arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
-
- arr = np.nan_to_num(arr)
-
- return arr
-
- def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
- """Pull required training data and train a model for each specified model.
-
- :param models_to_train <list>: list of models to train on.
- :param train_data_after <int>: integer timestamp for start of train data.
- :param train_data_before <int>: integer timestamp for end of train data.
- """
- now = datetime.now().timestamp()
- if train_data_after > 0 and train_data_before > 0:
- before = train_data_before
- after = train_data_after
- else:
- before = int(now) - self.offset_n_secs
- after = before - self.train_n_secs
-
- # get training data
- df_train = get_data(
- host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
- sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password,
- verify=self.tls_verify
- ).ffill()
- if self.custom_models:
- df_train = self.add_custom_models_dims(df_train)
-
- # train model
- self.try_fit(df_train, models_to_train=models_to_train)
- self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
- self.last_train_at = self.runs_counter
-
- def try_fit(self, df_train, models_to_train=None):
- """Try fit each model and try to fallback to a default model if fit fails for any reason.
-
- :param df_train <pd.DataFrame>: data to train on.
- :param models_to_train <list>: list of models to train.
- """
- if models_to_train is None:
- models_to_train = list(self.models.keys())
- self.n_fit_fail, self.n_fit_success = 0, 0
- for model in models_to_train:
- if model not in self.models:
- self.model_init(model)
- X_train = self.make_features(
- df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
- train=True, model=model)
- try:
- self.models[model].fit(X_train)
- self.n_fit_success += 1
- except Exception as e:
- self.n_fit_fail += 1
- self.info(e)
- self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
- self.models[model] = HBOS(contamination=self.contamination)
- self.models[model].fit(X_train)
- self.fitted_at[model] = self.runs_counter
-
- def predict(self):
- """Get latest data, make it into a feature vector, and get predictions for each available model.
-
- :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
- """
- # get recent data to predict on
- df_allmetrics = get_allmetrics_async(
- host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
- protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
- )
- if self.custom_models:
- df_allmetrics = self.add_custom_models_dims(df_allmetrics)
- self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
-
- # get predictions
- data_probability, data_anomaly = self.try_predict()
-
- return data_probability, data_anomaly
-
- def try_predict(self):
- """Try make prediction and fall back to last known prediction if fails.
-
- :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
- """
- data_probability, data_anomaly = {}, {}
- for model in self.fitted_at.keys():
- model_display_name = self.model_display_names[model]
- try:
- X_model = np.nan_to_num(
- self.make_features(
- self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
- model=model
- )[-1,:].reshape(1, -1)
- )
- data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
- data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
- except Exception as _:
- #self.info(e)
- if model_display_name + '_prob' in self.data_latest:
- #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
- data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
- data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
- else:
- #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
- continue
-
- return data_probability, data_anomaly
-
- def get_data(self):
-
- # initialize to what's available right now
- if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0:
- self.charts_init()
- self.custom_models_init()
- self.model_params_init()
-
- # if not all models have been trained then train those we need to
- if len(self.fitted_at) < len(self.models_in_scope):
- self.train(
- models_to_train=[m for m in self.models_in_scope if m not in self.fitted_at],
- train_data_after=self.initial_train_data_after,
- train_data_before=self.initial_train_data_before
- )
- # retrain all models as per schedule from config
- elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
- self.reinitialize()
- self.train()
-
- # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
- if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
- data_probability = self.data_probability_latest
- data_anomaly = self.data_anomaly_latest
- else:
- data_probability, data_anomaly = self.predict()
- if self.include_average_prob:
- average_prob = np.mean(list(data_probability.values()))
- data_probability['average_prob'] = 0 if np.isnan(average_prob) else average_prob
-
- data = {**data_probability, **data_anomaly}
-
- self.validate_charts('probability', data_probability, divisor=100)
- self.validate_charts('anomaly', data_anomaly)
-
- self.save_data_latest(data, data_probability, data_anomaly)
-
- #self.info(f'len(data)={len(data)}')
- #self.info(f'data')
-
- return data
diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf
deleted file mode 100644
index ef867709a..000000000
--- a/collectors/python.d.plugin/anomalies/anomalies.conf
+++ /dev/null
@@ -1,184 +0,0 @@
-# netdata python.d.plugin configuration for anomalies
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 2
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-# Pull data from local Netdata node.
-anomalies:
- name: 'Anomalies'
-
- # Host to pull data from.
- host: '127.0.0.1:19999'
-
- # Username and Password for Netdata if using basic auth.
- # username: '???'
- # password: '???'
-
- # Use http or https to pull data
- protocol: 'http'
-
- # SSL verify parameter for requests.get() calls
- tls_verify: true
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime,system.entropy'
-
- # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
- # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
- model: 'pca'
-
- # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
- train_max_n: 100000
-
- # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
- # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
- train_every_n: 1800
-
- # The length of the window of data to train on (14400 = last 4 hours).
- train_n_secs: 14400
-
- # How many prediction steps after a train event to just use previous prediction value for.
- # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
- train_no_prediction_n: 10
-
- # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
- # Start of training data for initial model.
- # initial_train_data_after: 1604578857
-
- # End of training data for initial model.
- # initial_train_data_before: 1604593257
-
- # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
- offset_n_secs: 0
-
- # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
- lags_n: 5
-
- # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
- smooth_n: 3
-
- # How many differences to take in preprocessing your data.
- # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
- # diffs_n=0 would mean training models on the raw values of each dimension.
- # diffs_n=1 means everything is done in terms of differences.
- diffs_n: 1
-
- # What is the typical proportion of anomalies in your data on average?
- # This parameter can control the sensitivity of your models to anomalies.
- # Some discussion here: https://github.com/yzhao062/pyod/issues/144
- contamination: 0.001
-
- # Set to true to include an "average_prob" dimension on anomalies probability chart which is
- # just the average of all anomaly probabilities at each time step
- include_average_prob: true
-
- # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
- # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
- # and one on the cpu and mem apps metrics for the python.d.plugin.
- # custom_models:
- # - name: 'demos_cpu'
- # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
- # - name: 'apps_python_d_plugin'
- # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
-
- # Set to true to normalize, using min-max standardization, features used for the custom models.
- # Useful if your custom models contain dimensions on very different scales an model you use does
- # not internally do its own normalization. Usually best to leave as false.
- # custom_models_normalize: false
-
-# Standalone Custom models example as an additional collector job.
-# custom:
-# name: 'custom'
-# host: '127.0.0.1:19999'
-# protocol: 'http'
-# charts_regex: 'None'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
-# custom_models:
-# - name: 'user_netdata'
-# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
-# - name: 'apps_python_d_plugin'
-# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
-
-# Pull data from some demo nodes for cross node custom models.
-# demos:
-# name: 'demos'
-# host: '127.0.0.1:19999'
-# protocol: 'http'
-# charts_regex: 'None'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
-# custom_models:
-# - name: 'system.cpu'
-# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
-# - name: 'system.ip'
-# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
-# - name: 'system.net'
-# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
-# - name: 'system.io'
-# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
-
-# Example additional job if you want to also pull data from a child streaming to your
-# local parent or even a remote node so long as the Netdata REST API is accessible.
-# mychildnode1:
-# name: 'mychildnode1'
-# host: '127.0.0.1:19999/host/mychildnode1'
-# protocol: 'http'
-# charts_regex: 'system\..*'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
diff --git a/collectors/python.d.plugin/anomalies/metadata.yaml b/collectors/python.d.plugin/anomalies/metadata.yaml
deleted file mode 100644
index d138cf5dd..000000000
--- a/collectors/python.d.plugin/anomalies/metadata.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-# NOTE: this file is commented out as users are reccomended to use the
-# native anomaly detection capabilities on the agent instead.
-# meta:
-# plugin_name: python.d.plugin
-# module_name: anomalies
-# monitored_instance:
-# name: python.d anomalies
-# link: ""
-# categories: []
-# icon_filename: ""
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ""
-# keywords: []
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: ""
-# method_description: ""
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ""
-# default_behavior:
-# auto_detection:
-# description: ""
-# limits:
-# description: ""
-# performance_impact:
-# description: ""
-# setup:
-# prerequisites:
-# list: []
-# configuration:
-# file:
-# name: ""
-# description: ""
-# options:
-# description: ""
-# folding:
-# title: ""
-# enabled: true
-# list: []
-# examples:
-# folding:
-# enabled: true
-# title: ""
-# list: []
-# troubleshooting:
-# problems:
-# list: []
-# alerts:
-# - name: anomalies_anomaly_probabilities
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/anomalies.conf
-# metric: anomalies.probability
-# info: average anomaly probability over the last 2 minutes
-# - name: anomalies_anomaly_flags
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/anomalies.conf
-# metric: anomalies.anomaly
-# info: number of anomalies in the last 2 minutes
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: global
-# description: ""
-# labels: []
-# metrics:
-# - name: anomalies.probability
-# description: Anomaly Probability
-# unit: "probability"
-# chart_type: line
-# dimensions:
-# - name: a dimension per probability
-# - name: anomalies.anomaly
-# description: Anomaly
-# unit: "count"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per anomaly
diff --git a/collectors/python.d.plugin/beanstalk/Makefile.inc b/collectors/python.d.plugin/beanstalk/Makefile.inc
deleted file mode 100644
index 4bbb7087d..000000000
--- a/collectors/python.d.plugin/beanstalk/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += beanstalk/beanstalk.chart.py
-dist_pythonconfig_DATA += beanstalk/beanstalk.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += beanstalk/README.md beanstalk/Makefile.inc
-
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
deleted file mode 120000
index 4efe13889..000000000
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/beanstalk.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
deleted file mode 100644
index 396543e5a..000000000
--- a/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: beanstalk netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import beanstalkc
-
- BEANSTALKC = True
-except ImportError:
- BEANSTALKC = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.loaders import load_yaml
-
-ORDER = [
- 'cpu_usage',
- 'jobs_rate',
- 'connections_rate',
- 'commands_rate',
- 'current_tubes',
- 'current_jobs',
- 'current_connections',
- 'binlog',
- 'uptime',
-]
-
-CHARTS = {
- 'cpu_usage': {
- 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
- 'lines': [
- ['rusage-utime', 'user', 'incremental'],
- ['rusage-stime', 'system', 'incremental']
- ]
- },
- 'jobs_rate': {
- 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
- 'lines': [
- ['total-jobs', 'total', 'incremental'],
- ['job-timeouts', 'timeouts', 'incremental']
- ]
- },
- 'connections_rate': {
- 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
- 'area'],
- 'lines': [
- ['total-connections', 'connections', 'incremental']
- ]
- },
- 'commands_rate': {
- 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
- 'lines': [
- ['cmd-put', 'put', 'incremental'],
- ['cmd-peek', 'peek', 'incremental'],
- ['cmd-peek-ready', 'peek-ready', 'incremental'],
- ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
- ['cmd-peek-buried', 'peek-buried', 'incremental'],
- ['cmd-reserve', 'reserve', 'incremental'],
- ['cmd-use', 'use', 'incremental'],
- ['cmd-watch', 'watch', 'incremental'],
- ['cmd-ignore', 'ignore', 'incremental'],
- ['cmd-delete', 'delete', 'incremental'],
- ['cmd-release', 'release', 'incremental'],
- ['cmd-bury', 'bury', 'incremental'],
- ['cmd-kick', 'kick', 'incremental'],
- ['cmd-stats', 'stats', 'incremental'],
- ['cmd-stats-job', 'stats-job', 'incremental'],
- ['cmd-stats-tube', 'stats-tube', 'incremental'],
- ['cmd-list-tubes', 'list-tubes', 'incremental'],
- ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
- ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
- ['cmd-pause-tube', 'pause-tube', 'incremental']
- ]
- },
- 'current_tubes': {
- 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
- 'lines': [
- ['current-tubes', 'tubes']
- ]
- },
- 'current_jobs': {
- 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
- 'lines': [
- ['current-jobs-urgent', 'urgent'],
- ['current-jobs-ready', 'ready'],
- ['current-jobs-reserved', 'reserved'],
- ['current-jobs-delayed', 'delayed'],
- ['current-jobs-buried', 'buried']
- ]
- },
- 'current_connections': {
- 'options': [None, 'Current Connections', 'connections', 'server statistics',
- 'beanstalk.current_connections', 'line'],
- 'lines': [
- ['current-connections', 'written'],
- ['current-producers', 'producers'],
- ['current-workers', 'workers'],
- ['current-waiting', 'waiting']
- ]
- },
- 'binlog': {
- 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
- 'lines': [
- ['binlog-records-written', 'written', 'incremental'],
- ['binlog-records-migrated', 'migrated', 'incremental']
- ]
- },
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
- 'lines': [
- ['uptime'],
- ]
- }
-}
-
-
-def tube_chart_template(name):
- order = [
- '{0}_jobs_rate'.format(name),
- '{0}_jobs'.format(name),
- '{0}_connections'.format(name),
- '{0}_commands'.format(name),
- '{0}_pause'.format(name)
- ]
- family = 'tube {0}'.format(name)
-
- charts = {
- order[0]: {
- 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
- 'lines': [
- ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
- ]
- },
- order[1]: {
- 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
- ['_'.join([name, 'current-jobs-ready']), 'ready'],
- ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
- ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
- ['_'.join([name, 'current-jobs-buried']), 'buried']
- ]
- },
- order[2]: {
- 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-using']), 'using'],
- ['_'.join([name, 'current-waiting']), 'waiting'],
- ['_'.join([name, 'current-watching']), 'watching']
- ]
- },
- order[3]: {
- 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
- 'lines': [
- ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
- ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
- ]
- },
- order[4]: {
- 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
- 'lines': [
- ['_'.join([name, 'pause']), 'since'],
- ['_'.join([name, 'pause-time-left']), 'left']
- ]
- }
- }
-
- return order, charts
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.configuration = configuration
- self.order = list(ORDER)
- self.definitions = dict(CHARTS)
- self.conn = None
- self.alive = True
-
- def check(self):
- if not BEANSTALKC:
- self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
- return False
-
- self.conn = self.connect()
-
- return True if self.conn else False
-
- def get_data(self):
- """
- :return: dict
- """
- if not self.is_alive():
- return None
-
- active_charts = self.charts.active_charts()
- data = dict()
-
- try:
- data.update(self.conn.stats())
-
- for tube in self.conn.tubes():
- stats = self.conn.stats_tube(tube)
-
- if tube + '_jobs_rate' not in active_charts:
- self.create_new_tube_charts(tube)
-
- for stat in stats:
- data['_'.join([tube, stat])] = stats[stat]
-
- except beanstalkc.SocketError:
- self.alive = False
- return None
-
- return data or None
-
- def create_new_tube_charts(self, tube):
- order, charts = tube_chart_template(tube)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
- def connect(self):
- host = self.configuration.get('host', '127.0.0.1')
- port = self.configuration.get('port', 11300)
- timeout = self.configuration.get('timeout', 1)
- try:
- return beanstalkc.Connection(host=host,
- port=port,
- connect_timeout=timeout,
- parse_yaml=load_yaml)
- except beanstalkc.SocketError as error:
- self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
- return None
-
- def reconnect(self):
- try:
- self.conn.reconnect()
- self.alive = True
- return True
- except beanstalkc.SocketError:
- return False
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
diff --git a/collectors/python.d.plugin/beanstalk/beanstalk.conf b/collectors/python.d.plugin/beanstalk/beanstalk.conf
deleted file mode 100644
index 6d9773a19..000000000
--- a/collectors/python.d.plugin/beanstalk/beanstalk.conf
+++ /dev/null
@@ -1,78 +0,0 @@
-# netdata python.d.plugin configuration for beanstalk
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# chart_cleanup sets the default chart cleanup interval in iterations.
-# A chart is marked as obsolete if it has not been updated
-# 'chart_cleanup' iterations in a row.
-# When a plugin sends the obsolete flag, the charts are not deleted
-# from netdata immediately.
-# They will be hidden immediately (not offered to dashboard viewer,
-# streamed upstream and archived to external databases) and deleted one hour
-# later (configurable from netdata.conf).
-# chart_cleanup: 10
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
-#
-# Additionally to the above, beanstalk also supports the following:
-#
-# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
-# port: port # Beanstalkd port. Default:
-#
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
deleted file mode 100644
index 5095c0c28..000000000
--- a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
+++ /dev/null
@@ -1,219 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/metadata.yaml"
-sidebar_label: "Beanstalk"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Message Brokers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Beanstalk
-
-
-<img src="https://netdata.cloud/img/beanstalk.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: beanstalk
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.
-
-The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Beanstalk instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| beanstalk.cpu_usage | user, system | cpu time |
-| beanstalk.jobs_rate | total, timeouts | jobs/s |
-| beanstalk.connections_rate | connections | connections/s |
-| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |
-| beanstalk.connections_rate | tubes | tubes |
-| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |
-| beanstalk.current_connections | written, producers, workers, waiting | connections |
-| beanstalk.binlog | written, migrated | records/s |
-| beanstalk.uptime | uptime | seconds |
-
-### Per tube
-
-Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| beanstalk.jobs_rate | jobs | jobs/s |
-| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |
-| beanstalk.connections | using, waiting, watching | connections |
-| beanstalk.commands | deletes, pauses | commands/s |
-| beanstalk.pause | since, left | seconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |
-
-
-## Setup
-
-### Prerequisites
-
-#### beanstalkc python module
-
-The collector requires the `beanstalkc` python module to be installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/beanstalk.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/beanstalk.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |
-| port | Port to the IP or URL to a beanstalk service. | 11300 | no |
-
-</details>
-
-#### Examples
-
-##### Remote beanstalk server
-
-A basic remote beanstalk server
-
-```yaml
-remote:
- name: 'beanstalk'
- host: '1.2.3.4'
- port: 11300
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local_beanstalk'
- host: '127.0.0.1'
- port: 11300
-
-remote_job:
- name: 'remote_beanstalk'
- host: '192.0.2.1'
- port: 113000
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin beanstalk debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/beanstalk/metadata.yaml b/collectors/python.d.plugin/beanstalk/metadata.yaml
deleted file mode 100644
index 7dff9cb3a..000000000
--- a/collectors/python.d.plugin/beanstalk/metadata.yaml
+++ /dev/null
@@ -1,263 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: beanstalk
- monitored_instance:
- name: Beanstalk
- link: "https://beanstalkd.github.io/"
- categories:
- - data-collection.message-brokers
- #- data-collection.task-queues
- icon_filename: "beanstalk.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - beanstalk
- - beanstalkd
- - message
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management."
- method_description: "The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "beanstalkc python module"
- description: The collector requires the `beanstalkc` python module to be installed.
- configuration:
- file:
- name: python.d/beanstalk.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: IP or URL to a beanstalk service.
- default_value: "127.0.0.1"
- required: false
- - name: port
- description: Port to the IP or URL to a beanstalk service.
- default_value: "11300"
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Remote beanstalk server
- description: A basic remote beanstalk server
- folding:
- enabled: false
- config: |
- remote:
- name: 'beanstalk'
- host: '1.2.3.4'
- port: 11300
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local_beanstalk'
- host: '127.0.0.1'
- port: 11300
-
- remote_job:
- name: 'remote_beanstalk'
- host: '192.0.2.1'
- port: 113000
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: beanstalk_server_buried_jobs
- link: https://github.com/netdata/netdata/blob/master/health/health.d/beanstalkd.conf
- metric: beanstalk.current_jobs
- info: number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs.
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: beanstalk.cpu_usage
- description: Cpu Usage
- unit: "cpu time"
- chart_type: area
- dimensions:
- - name: user
- - name: system
- - name: beanstalk.jobs_rate
- description: Jobs Rate
- unit: "jobs/s"
- chart_type: line
- dimensions:
- - name: total
- - name: timeouts
- - name: beanstalk.connections_rate
- description: Connections Rate
- unit: "connections/s"
- chart_type: area
- dimensions:
- - name: connections
- - name: beanstalk.commands_rate
- description: Commands Rate
- unit: "commands/s"
- chart_type: stacked
- dimensions:
- - name: put
- - name: peek
- - name: peek-ready
- - name: peek-delayed
- - name: peek-buried
- - name: reserve
- - name: use
- - name: watch
- - name: ignore
- - name: delete
- - name: bury
- - name: kick
- - name: stats
- - name: stats-job
- - name: stats-tube
- - name: list-tubes
- - name: list-tube-used
- - name: list-tubes-watched
- - name: pause-tube
- - name: beanstalk.connections_rate
- description: Current Tubes
- unit: "tubes"
- chart_type: area
- dimensions:
- - name: tubes
- - name: beanstalk.current_jobs
- description: Current Jobs
- unit: "jobs"
- chart_type: stacked
- dimensions:
- - name: urgent
- - name: ready
- - name: reserved
- - name: delayed
- - name: buried
- - name: beanstalk.current_connections
- description: Current Connections
- unit: "connections"
- chart_type: line
- dimensions:
- - name: written
- - name: producers
- - name: workers
- - name: waiting
- - name: beanstalk.binlog
- description: Binlog
- unit: "records/s"
- chart_type: line
- dimensions:
- - name: written
- - name: migrated
- - name: beanstalk.uptime
- description: seconds
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - name: tube
- description: "Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics."
- labels: []
- metrics:
- - name: beanstalk.jobs_rate
- description: Jobs Rate
- unit: "jobs/s"
- chart_type: area
- dimensions:
- - name: jobs
- - name: beanstalk.jobs
- description: Jobs
- unit: "jobs"
- chart_type: stacked
- dimensions:
- - name: urgent
- - name: ready
- - name: reserved
- - name: delayed
- - name: buried
- - name: beanstalk.connections
- description: Connections
- unit: "connections"
- chart_type: stacked
- dimensions:
- - name: using
- - name: waiting
- - name: watching
- - name: beanstalk.commands
- description: Commands
- unit: "commands/s"
- chart_type: stacked
- dimensions:
- - name: deletes
- - name: pauses
- - name: beanstalk.pause
- description: Pause
- unit: "seconds"
- chart_type: stacked
- dimensions:
- - name: since
- - name: left
diff --git a/collectors/python.d.plugin/bind_rndc/Makefile.inc b/collectors/python.d.plugin/bind_rndc/Makefile.inc
deleted file mode 100644
index 72f391492..000000000
--- a/collectors/python.d.plugin/bind_rndc/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += bind_rndc/bind_rndc.chart.py
-dist_pythonconfig_DATA += bind_rndc/bind_rndc.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += bind_rndc/README.md bind_rndc/Makefile.inc
-
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
deleted file mode 120000
index 03a182ae8..000000000
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/isc_bind_rndc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py b/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
deleted file mode 100644
index 9d6c9fec7..000000000
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: bind rndc netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-from collections import defaultdict
-from subprocess import Popen
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-update_every = 30
-
-ORDER = [
- 'name_server_statistics',
- 'incoming_queries',
- 'outgoing_queries',
- 'named_stats_size',
-]
-
-CHARTS = {
- 'name_server_statistics': {
- 'options': [None, 'Name Server Statistics', 'stats', 'name server statistics',
- 'bind_rndc.name_server_statistics', 'line'],
- 'lines': [
- ['nms_requests', 'requests', 'incremental'],
- ['nms_rejected_queries', 'rejected_queries', 'incremental'],
- ['nms_success', 'success', 'incremental'],
- ['nms_failure', 'failure', 'incremental'],
- ['nms_responses', 'responses', 'incremental'],
- ['nms_duplicate', 'duplicate', 'incremental'],
- ['nms_recursion', 'recursion', 'incremental'],
- ['nms_nxrrset', 'nxrrset', 'incremental'],
- ['nms_nxdomain', 'nxdomain', 'incremental'],
- ['nms_non_auth_answer', 'non_auth_answer', 'incremental'],
- ['nms_auth_answer', 'auth_answer', 'incremental'],
- ['nms_dropped_queries', 'dropped_queries', 'incremental'],
- ]},
- 'incoming_queries': {
- 'options': [None, 'Incoming Queries', 'queries', 'incoming queries', 'bind_rndc.incoming_queries', 'line'],
- 'lines': [
- ]},
- 'outgoing_queries': {
- 'options': [None, 'Outgoing Queries', 'queries', 'outgoing queries', 'bind_rndc.outgoing_queries', 'line'],
- 'lines': [
- ]},
- 'named_stats_size': {
- 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
- 'lines': [
- ['stats_size', None, 'absolute', 1, 1 << 20]
- ]
- }
-}
-
-NMS = {
- 'nms_requests': [
- 'IPv4 requests received',
- 'IPv6 requests received',
- 'TCP requests received',
- 'requests with EDNS(0) receive'
- ],
- 'nms_responses': [
- 'responses sent',
- 'truncated responses sent',
- 'responses with EDNS(0) sent',
- 'requests with unsupported EDNS version received'
- ],
- 'nms_failure': [
- 'other query failures',
- 'queries resulted in SERVFAIL'
- ],
- 'nms_auth_answer': ['queries resulted in authoritative answer'],
- 'nms_non_auth_answer': ['queries resulted in non authoritative answer'],
- 'nms_nxrrset': ['queries resulted in nxrrset'],
- 'nms_success': ['queries resulted in successful answer'],
- 'nms_nxdomain': ['queries resulted in NXDOMAIN'],
- 'nms_recursion': ['queries caused recursion'],
- 'nms_duplicate': ['duplicate queries received'],
- 'nms_rejected_queries': [
- 'auth queries rejected',
- 'recursive queries rejected'
- ],
- 'nms_dropped_queries': ['queries dropped']
-}
-
-STATS = ['Name Server Statistics', 'Incoming Queries', 'Outgoing Queries']
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
- self.rndc = find_binary('rndc')
- self.data = dict(
- nms_requests=0,
- nms_responses=0,
- nms_failure=0,
- nms_auth=0,
- nms_non_auth=0,
- nms_nxrrset=0,
- nms_success=0,
- nms_nxdomain=0,
- nms_recursion=0,
- nms_duplicate=0,
- nms_rejected_queries=0,
- nms_dropped_queries=0,
- )
-
- def check(self):
- if not self.rndc:
- self.error('Can\'t locate "rndc" binary or binary is not executable by netdata')
- return False
-
- if not (os.path.isfile(self.named_stats_path) and os.access(self.named_stats_path, os.R_OK)):
- self.error('Cannot access file %s' % self.named_stats_path)
- return False
-
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if not run_rndc.returncode:
- return True
- self.error('Not enough permissions to run "%s stats"' % self.rndc)
- return False
-
- def _get_raw_data(self):
- """
- Run 'rndc stats' and read last dump from named.stats
- :return: dict
- """
- result = dict()
- try:
- current_size = os.path.getsize(self.named_stats_path)
- run_rndc = Popen([self.rndc, 'stats'], shell=False)
- run_rndc.wait()
-
- if run_rndc.returncode:
- return None
- with open(self.named_stats_path) as named_stats:
- named_stats.seek(current_size)
- result['stats'] = named_stats.readlines()
- result['size'] = current_size
- return result
- except (OSError, IOError):
- return None
-
- def _get_data(self):
- """
- Parse data from _get_raw_data()
- :return: dict
- """
-
- raw_data = self._get_raw_data()
-
- if raw_data is None:
- return None
- parsed = dict()
- for stat in STATS:
- parsed[stat] = parse_stats(field=stat,
- named_stats=raw_data['stats'])
-
- self.data.update(nms_mapper(data=parsed['Name Server Statistics']))
-
- for elem in zip(['Incoming Queries', 'Outgoing Queries'], ['incoming_queries', 'outgoing_queries']):
- parsed_key, chart_name = elem[0], elem[1]
- for dimension_id, value in queries_mapper(data=parsed[parsed_key],
- add=chart_name[:9]).items():
-
- if dimension_id not in self.data:
- dimension = dimension_id.replace(chart_name[:9], '')
- if dimension_id not in self.charts[chart_name]:
- self.charts[chart_name].add_dimension([dimension_id, dimension, 'incremental'])
-
- self.data[dimension_id] = value
-
- self.data['stats_size'] = raw_data['size']
- return self.data
-
-
-def parse_stats(field, named_stats):
- """
- :param field: str:
- :param named_stats: list:
- :return: dict
-
- Example:
- filed: 'Incoming Queries'
- names_stats (list of lines):
- ++ Incoming Requests ++
- 1405660 QUERY
- 3 NOTIFY
- ++ Incoming Queries ++
- 1214961 A
- 75 NS
- 2 CNAME
- 2897 SOA
- 35544 PTR
- 14 MX
- 5822 TXT
- 145974 AAAA
- 371 SRV
- ++ Outgoing Queries ++
- ...
-
- result:
- {'A', 1214961, 'NS': 75, 'CNAME': 2, 'SOA': 2897, ...}
- """
- data = dict()
- ns = iter(named_stats)
- for line in ns:
- if field not in line:
- continue
- while True:
- try:
- line = next(ns)
- except StopIteration:
- break
- if '++' not in line:
- if '[' in line:
- continue
- v, k = line.strip().split(' ', 1)
- if k not in data:
- data[k] = 0
- data[k] += int(v)
- continue
- break
- break
- return data
-
-
-def nms_mapper(data):
- """
- :param data: dict
- :return: dict(defaultdict)
- """
- result = defaultdict(int)
- for k, v in NMS.items():
- for elem in v:
- result[k] += data.get(elem, 0)
- return result
-
-
-def queries_mapper(data, add):
- """
- :param data: dict
- :param add: str
- :return: dict
- """
- return dict([(add + k, v) for k, v in data.items()])
diff --git a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf b/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
deleted file mode 100644
index 84eaf0594..000000000
--- a/collectors/python.d.plugin/bind_rndc/bind_rndc.conf
+++ /dev/null
@@ -1,108 +0,0 @@
-# netdata python.d.plugin configuration for bind_rndc
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, bind_rndc also supports the following:
-#
-# named_stats_path: 'path to named.stats' # Default: '/var/log/bind/named.stats'
-#------------------------------------------------------------------------------------------------------------------
-# Important Information
-#
-# BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set update_every below 30 sec.
-# It is STRONGLY RECOMMENDED to create a bind-rndc.conf file for logrotate.
-#
-# To set up your BIND to dump stats do the following:
-#
-# 1. Add to 'named.conf.options' options {}:
-# statistics-file "/var/log/bind/named.stats";
-#
-# 2. Create bind/ directory in /var/log
-# cd /var/log/ && mkdir bind
-#
-# 3. Change owner of directory to 'bind' user
-# chown bind bind/
-#
-# 4. RELOAD (NOT restart) BIND
-# systemctl reload bind9.service
-#
-# 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
-#
-# To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata
-# chown :netdata rndc.key
-#
-# Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
-#
-# /var/log/bind/named.stats {
-#
-# daily
-# rotate 4
-# compress
-# delaycompress
-# create 0644 bind bind
-# missingok
-# postrotate
-# rndc reload > /dev/null
-# endscript
-# }
-#
-# To test your logrotate conf file run as root:
-# logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)
-#
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
deleted file mode 100644
index 163f8282c..000000000
--- a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
+++ /dev/null
@@ -1,215 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/metadata.yaml"
-sidebar_label: "ISC Bind (RNDC)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/DNS and DHCP Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# ISC Bind (RNDC)
-
-
-<img src="https://netdata.cloud/img/isc.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: bind_rndc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.
-
-This collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per ISC Bind (RNDC) instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |
-| bind_rndc.incoming_queries | a dimension per incoming query type | queries |
-| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |
-| bind_rndc.stats_size | stats_size | MiB |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |
-
-
-## Setup
-
-### Prerequisites
-
-#### Minimum bind version and permissions
-
-Version of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`
-
-#### Setup log rotate for bind stats
-
-BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.
-It is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.
-
-To set up BIND to dump stats do the following:
-
-1. Add to 'named.conf.options' options {}:
-`statistics-file "/var/log/bind/named.stats";`
-
-2. Create bind/ directory in /var/log:
-`cd /var/log/ && mkdir bind`
-
-3. Change owner of directory to 'bind' user:
-`chown bind bind/`
-
-4. RELOAD (NOT restart) BIND:
-`systemctl reload bind9.service`
-
-5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
-
-To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:
-`chown :netdata rndc.key`
-
-Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
-```
-/var/log/bind/named.stats {
-
- daily
- rotate 4
- compress
- delaycompress
- create 0644 bind bind
- missingok
- postrotate
- rndc reload > /dev/null
- endscript
-}
-```
-To test your logrotate conf file run as root:
-`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/bind_rndc.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/bind_rndc.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |
-
-</details>
-
-#### Examples
-
-##### Local bind stats
-
-Define a local path to bind stats file
-
-```yaml
-local:
- named_stats_path: '/var/log/bind/named.stats'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin bind_rndc debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/bind_rndc/metadata.yaml b/collectors/python.d.plugin/bind_rndc/metadata.yaml
deleted file mode 100644
index e3568e448..000000000
--- a/collectors/python.d.plugin/bind_rndc/metadata.yaml
+++ /dev/null
@@ -1,191 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: bind_rndc
- monitored_instance:
- name: ISC Bind (RNDC)
- link: "https://www.isc.org/bind/"
- categories:
- - data-collection.dns-and-dhcp-servers
- icon_filename: "isc.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - dns
- - bind
- - server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery."
- method_description: "This collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Minimum bind version and permissions"
- description: "Version of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`"
- - title: "Setup log rotate for bind stats"
- description: |
- BIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.
- It is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.
-
- To set up BIND to dump stats do the following:
-
- 1. Add to 'named.conf.options' options {}:
- `statistics-file "/var/log/bind/named.stats";`
-
- 2. Create bind/ directory in /var/log:
- `cd /var/log/ && mkdir bind`
-
- 3. Change owner of directory to 'bind' user:
- `chown bind bind/`
-
- 4. RELOAD (NOT restart) BIND:
- `systemctl reload bind9.service`
-
- 5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)
-
- To allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:
- `chown :netdata rndc.key`
-
- Last, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:
- ```
- /var/log/bind/named.stats {
-
- daily
- rotate 4
- compress
- delaycompress
- create 0644 bind bind
- missingok
- postrotate
- rndc reload > /dev/null
- endscript
- }
- ```
- To test your logrotate conf file run as root:
- `logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`
- configuration:
- file:
- name: python.d/bind_rndc.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: named_stats_path
- description: Path to the named stats, after being dumped by `nrdc`
- default_value: "/var/log/bind/named.stats"
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local bind stats
- description: Define a local path to bind stats file
- config: |
- local:
- named_stats_path: '/var/log/bind/named.stats'
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: bind_rndc_stats_file_size
- link: https://github.com/netdata/netdata/blob/master/health/health.d/bind_rndc.conf
- metric: bind_rndc.stats_size
- info: BIND statistics-file size
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: bind_rndc.name_server_statistics
- description: Name Server Statistics
- unit: "stats"
- chart_type: line
- dimensions:
- - name: requests
- - name: rejected_queries
- - name: success
- - name: failure
- - name: responses
- - name: duplicate
- - name: recursion
- - name: nxrrset
- - name: nxdomain
- - name: non_auth_answer
- - name: auth_answer
- - name: dropped_queries
- - name: bind_rndc.incoming_queries
- description: Incoming queries
- unit: "queries"
- chart_type: line
- dimensions:
- - name: a dimension per incoming query type
- - name: bind_rndc.outgoing_queries
- description: Outgoing queries
- unit: "queries"
- chart_type: line
- dimensions:
- - name: a dimension per outgoing query type
- - name: bind_rndc.stats_size
- description: Named Stats File Size
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: stats_size
diff --git a/collectors/python.d.plugin/boinc/Makefile.inc b/collectors/python.d.plugin/boinc/Makefile.inc
deleted file mode 100644
index 319e19cfe..000000000
--- a/collectors/python.d.plugin/boinc/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += boinc/boinc.chart.py
-dist_pythonconfig_DATA += boinc/boinc.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += boinc/README.md boinc/Makefile.inc
-
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
deleted file mode 120000
index 22c10ca17..000000000
--- a/collectors/python.d.plugin/boinc/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/boinc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/boinc/boinc.chart.py b/collectors/python.d.plugin/boinc/boinc.chart.py
deleted file mode 100644
index a31eda1c2..000000000
--- a/collectors/python.d.plugin/boinc/boinc.chart.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: BOINC netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import socket
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import boinc_client
-
-ORDER = [
- 'tasks',
- 'states',
- 'sched_states',
- 'process_states',
-]
-
-CHARTS = {
- 'tasks': {
- 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
- 'lines': [
- ['total', 'Total', 'absolute', 1, 1],
- ['active', 'Active', 'absolute', 1, 1]
- ]
- },
- 'states': {
- 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
- 'lines': [
- ['new', 'New', 'absolute', 1, 1],
- ['downloading', 'Downloading', 'absolute', 1, 1],
- ['downloaded', 'Ready to Run', 'absolute', 1, 1],
- ['comperror', 'Compute Errors', 'absolute', 1, 1],
- ['uploading', 'Uploading', 'absolute', 1, 1],
- ['uploaded', 'Uploaded', 'absolute', 1, 1],
- ['aborted', 'Aborted', 'absolute', 1, 1],
- ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
- ]
- },
- 'sched_states': {
- 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
- 'lines': [
- ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
- ['preempted', 'Preempted', 'absolute', 1, 1],
- ['scheduled', 'Scheduled', 'absolute', 1, 1]
- ]
- },
- 'process_states': {
- 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
- 'lines': [
- ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
- ['executing', 'Executing', 'absolute', 1, 1],
- ['suspended', 'Suspended', 'absolute', 1, 1],
- ['aborting', 'Aborted', 'absolute', 1, 1],
- ['quit', 'Quit', 'absolute', 1, 1],
- ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
- ]
- }
-}
-
-# A simple template used for pre-loading the return dictionary to make
-# the _get_data() method simpler.
-_DATA_TEMPLATE = {
- 'total': 0,
- 'active': 0,
- 'new': 0,
- 'downloading': 0,
- 'downloaded': 0,
- 'comperror': 0,
- 'uploading': 0,
- 'uploaded': 0,
- 'aborted': 0,
- 'upload_failed': 0,
- 'uninit_sched': 0,
- 'preempted': 0,
- 'scheduled': 0,
- 'uninit_proc': 0,
- 'executing': 0,
- 'suspended': 0,
- 'aborting': 0,
- 'quit': 0,
- 'copy_pending': 0
-}
-
-# Map task states to dimensions
-_TASK_MAP = {
- boinc_client.ResultState.NEW: 'new',
- boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
- boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
- boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
- boinc_client.ResultState.FILES_UPLOADING: 'uploading',
- boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
- boinc_client.ResultState.ABORTED: 'aborted',
- boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
-}
-
-# Map scheduler states to dimensions
-_SCHED_MAP = {
- boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
- boinc_client.CpuSched.PREEMPTED: 'preempted',
- boinc_client.CpuSched.SCHEDULED: 'scheduled',
-}
-
-# Maps process states to dimensions
-_PROC_MAP = {
- boinc_client.Process.UNINITIALIZED: 'uninit_proc',
- boinc_client.Process.EXECUTING: 'executing',
- boinc_client.Process.SUSPENDED: 'suspended',
- boinc_client.Process.ABORT_PENDING: 'aborted',
- boinc_client.Process.QUIT_PENDING: 'quit',
- boinc_client.Process.COPY_PENDING: 'copy_pending'
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 0)
- self.password = self.configuration.get('password', '')
- self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
- self.alive = False
-
- def check(self):
- return self.connect()
-
- def connect(self):
- self.client.connect()
- self.alive = self.client.connected and self.client.authorized
- return self.alive
-
- def reconnect(self):
- # The client class itself actually disconnects existing
- # connections when it is told to connect, so we don't need to
- # explicitly disconnect when we're just trying to reconnect.
- return self.connect()
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
-
- def _get_data(self):
- if not self.is_alive():
- return None
-
- data = dict(_DATA_TEMPLATE)
-
- try:
- results = self.client.get_tasks()
- except socket.error:
- self.error('Connection is dead')
- self.alive = False
- return None
-
- for task in results:
- data['total'] += 1
- data[_TASK_MAP[task.state]] += 1
- try:
- if task.active_task:
- data['active'] += 1
- data[_SCHED_MAP[task.scheduler_state]] += 1
- data[_PROC_MAP[task.active_task_state]] += 1
- except AttributeError:
- pass
-
- return data or None
diff --git a/collectors/python.d.plugin/boinc/boinc.conf b/collectors/python.d.plugin/boinc/boinc.conf
deleted file mode 100644
index 16edf55c4..000000000
--- a/collectors/python.d.plugin/boinc/boinc.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for boinc
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, boinc also supports the following:
-#
-# hostname: localhost # The host running the BOINC client
-# port: 31416 # The remote GUI RPC port for BOINC
-# password: '' # The remote GUI RPC password
diff --git a/collectors/python.d.plugin/boinc/integrations/boinc.md b/collectors/python.d.plugin/boinc/integrations/boinc.md
deleted file mode 100644
index d6874d455..000000000
--- a/collectors/python.d.plugin/boinc/integrations/boinc.md
+++ /dev/null
@@ -1,204 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/metadata.yaml"
-sidebar_label: "BOINC"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Distributed Computing Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# BOINC
-
-
-<img src="https://netdata.cloud/img/bolt.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: boinc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.
-
-It uses the same RPC interface that the BOINC monitoring GUI does.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per BOINC instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| boinc.tasks | Total, Active | tasks |
-| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |
-| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |
-| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |
-| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |
-| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |
-| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-#### Boinc RPC interface
-
-BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/boinc.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/boinc.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| hostname | Define a hostname where boinc is running. | localhost | no |
-| port | The port of boinc RPC interface. | | no |
-| password | Provide a password to connect to a boinc RPC interface. | | no |
-
-</details>
-
-#### Examples
-
-##### Configuration of a remote boinc instance
-
-A basic JOB configuration for a remote boinc instance
-
-```yaml
-remote:
- hostname: '1.2.3.4'
- port: 1234
- password: 'some-password'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 1234
- password: 'some-password'
-
-remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 1234
- password: some-other-password
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin boinc debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/boinc/metadata.yaml b/collectors/python.d.plugin/boinc/metadata.yaml
deleted file mode 100644
index 33a67ac34..000000000
--- a/collectors/python.d.plugin/boinc/metadata.yaml
+++ /dev/null
@@ -1,198 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: boinc
- monitored_instance:
- name: BOINC
- link: "https://boinc.berkeley.edu/"
- categories:
- - data-collection.distributed-computing-systems
- icon_filename: "bolt.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - boinc
- - distributed
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client."
- method_description: "It uses the same RPC interface that the BOINC monitoring GUI does."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Boinc RPC interface"
- description: BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
- configuration:
- file:
- name: python.d/boinc.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: hostname
- description: Define a hostname where boinc is running.
- default_value: "localhost"
- required: false
- - name: port
- description: The port of boinc RPC interface.
- default_value: ""
- required: false
- - name: password
- description: Provide a password to connect to a boinc RPC interface.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Configuration of a remote boinc instance
- description: A basic JOB configuration for a remote boinc instance
- folding:
- enabled: false
- config: |
- remote:
- hostname: '1.2.3.4'
- port: 1234
- password: 'some-password'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 1234
- password: 'some-password'
-
- remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 1234
- password: some-other-password
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: boinc_total_tasks
- link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
- metric: boinc.tasks
- info: average number of total tasks over the last 10 minutes
- os: "*"
- - name: boinc_active_tasks
- link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
- metric: boinc.tasks
- info: average number of active tasks over the last 10 minutes
- os: "*"
- - name: boinc_compute_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
- metric: boinc.states
- info: average number of compute errors over the last 10 minutes
- os: "*"
- - name: boinc_upload_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/boinc.conf
- metric: boinc.states
- info: average number of failed uploads over the last 10 minutes
- os: "*"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: boinc.tasks
- description: Overall Tasks
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Total
- - name: Active
- - name: boinc.states
- description: Tasks per State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: New
- - name: Downloading
- - name: Ready to Run
- - name: Compute Errors
- - name: Uploading
- - name: Uploaded
- - name: Aborted
- - name: Failed Uploads
- - name: boinc.sched
- description: Tasks per Scheduler State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Uninitialized
- - name: Preempted
- - name: Scheduled
- - name: boinc.process
- description: Tasks per Process State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Uninitialized
- - name: Executing
- - name: Suspended
- - name: Aborted
- - name: Quit
- - name: Copy Pending
diff --git a/collectors/python.d.plugin/ceph/Makefile.inc b/collectors/python.d.plugin/ceph/Makefile.inc
deleted file mode 100644
index 15b039ef6..000000000
--- a/collectors/python.d.plugin/ceph/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += ceph/ceph.chart.py
-dist_pythonconfig_DATA += ceph/ceph.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += ceph/README.md ceph/Makefile.inc
-
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
deleted file mode 120000
index 654248b70..000000000
--- a/collectors/python.d.plugin/ceph/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/ceph.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/ceph/ceph.chart.py b/collectors/python.d.plugin/ceph/ceph.chart.py
deleted file mode 100644
index 4bcbe1979..000000000
--- a/collectors/python.d.plugin/ceph/ceph.chart.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: ceph netdata python.d module
-# Author: Luis Eduardo (lets00)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import rados
-
- CEPH = True
-except ImportError:
- CEPH = False
-
-import json
-import os
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-update_every = 10
-
-ORDER = [
- 'general_usage',
- 'general_objects',
- 'general_bytes',
- 'general_operations',
- 'general_latency',
- 'pool_usage',
- 'pool_objects',
- 'pool_read_bytes',
- 'pool_write_bytes',
- 'pool_read_operations',
- 'pool_write_operations',
- 'osd_usage',
- 'osd_size',
- 'osd_apply_latency',
- 'osd_commit_latency'
-]
-
-CHARTS = {
- 'general_usage': {
- 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
- 'lines': [
- ['general_available', 'avail', 'absolute'],
- ['general_usage', 'used', 'absolute']
- ]
- },
- 'general_objects': {
- 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
- 'lines': [
- ['general_objects', 'cluster', 'absolute']
- ]
- },
- 'general_bytes': {
- 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
- 'area'],
- 'lines': [
- ['general_read_bytes', 'read', 'absolute', 1, 1024],
- ['general_write_bytes', 'write', 'absolute', -1, 1024]
- ]
- },
- 'general_operations': {
- 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
- 'area'],
- 'lines': [
- ['general_read_operations', 'read', 'absolute', 1],
- ['general_write_operations', 'write', 'absolute', -1]
- ]
- },
- 'general_latency': {
- 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
- 'area'],
- 'lines': [
- ['general_apply_latency', 'apply', 'absolute'],
- ['general_commit_latency', 'commit', 'absolute']
- ]
- },
- 'pool_usage': {
- 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
- 'lines': []
- },
- 'pool_objects': {
- 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
- 'lines': []
- },
- 'pool_read_bytes': {
- 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
- 'lines': []
- },
- 'pool_write_bytes': {
- 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
- 'lines': []
- },
- 'pool_read_operations': {
- 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
- 'lines': []
- },
- 'pool_write_operations': {
- 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
- 'lines': []
- },
- 'osd_usage': {
- 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
- 'lines': []
- },
- 'osd_size': {
- 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
- 'lines': []
- },
- 'osd_apply_latency': {
- 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
- 'lines': []
- },
- 'osd_commit_latency': {
- 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
- 'lines': []
- }
-
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.config_file = self.configuration.get('config_file')
- self.keyring_file = self.configuration.get('keyring_file')
- self.rados_id = self.configuration.get('rados_id', 'admin')
-
- def check(self):
- """
- Checks module
- :return:
- """
- if not CEPH:
- self.error('rados module is needed to use ceph.chart.py')
- return False
- if not (self.config_file and self.keyring_file):
- self.error('config_file and/or keyring_file is not defined')
- return False
-
- # Verify files and permissions
- if not (os.access(self.config_file, os.F_OK)):
- self.error('{0} does not exist'.format(self.config_file))
- return False
- if not (os.access(self.keyring_file, os.F_OK)):
- self.error('{0} does not exist'.format(self.keyring_file))
- return False
- if not (os.access(self.config_file, os.R_OK)):
- self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
- return False
- if not (os.access(self.keyring_file, os.R_OK)):
- self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
- return False
- try:
- self.cluster = rados.Rados(conffile=self.config_file,
- conf=dict(keyring=self.keyring_file),
- rados_id=self.rados_id)
- self.cluster.connect()
- except rados.Error as error:
- self.error(error)
- return False
- self.create_definitions()
- return True
-
- def create_definitions(self):
- """
- Create dynamically charts options
- :return: None
- """
- # Pool lines
- for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
- self.definitions['pool_usage']['lines'].append([pool['name'],
- pool['name'],
- 'absolute'])
- self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
-
- # OSD lines
- for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
- self.definitions['osd_usage']['lines'].append([osd['name'],
- osd['name'],
- 'absolute'])
- self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
- self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
- self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
-
- def get_data(self):
- """
- Catch all ceph data
- :return: dict
- """
- try:
- data = {}
- df = self._get_df()
- osd_df = self._get_osd_df()
- osd_perf = self._get_osd_perf()
- osd_perf_infos = get_osd_perf_infos(osd_perf)
- pool_stats = self._get_osd_pool_stats()
-
- data.update(self._get_general(osd_perf_infos, pool_stats))
- for pool in df['pools']:
- data.update(self._get_pool_usage(pool))
- data.update(self._get_pool_objects(pool))
- for pool_io in pool_stats:
- data.update(self._get_pool_rw(pool_io))
- for osd in osd_df['nodes']:
- data.update(self._get_osd_usage(osd))
- data.update(self._get_osd_size(osd))
- for osd_apply_commit in osd_perf_infos:
- data.update(self._get_osd_latency(osd_apply_commit))
- return data
- except (ValueError, AttributeError) as error:
- self.error(error)
- return None
-
- def _get_general(self, osd_perf_infos, pool_stats):
- """
- Get ceph's general usage
- :return: dict
- """
- status = self.cluster.get_cluster_stats()
- read_bytes_sec = 0
- write_bytes_sec = 0
- read_op_per_sec = 0
- write_op_per_sec = 0
- apply_latency = 0
- commit_latency = 0
-
- for pool_rw_io_b in pool_stats:
- read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
- write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
- read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
- write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
- for perf in osd_perf_infos:
- apply_latency += perf['perf_stats']['apply_latency_ms']
- commit_latency += perf['perf_stats']['commit_latency_ms']
-
- return {
- 'general_usage': int(status['kb_used']),
- 'general_available': int(status['kb_avail']),
- 'general_objects': int(status['num_objects']),
- 'general_read_bytes': read_bytes_sec,
- 'general_write_bytes': write_bytes_sec,
- 'general_read_operations': read_op_per_sec,
- 'general_write_operations': write_op_per_sec,
- 'general_apply_latency': apply_latency,
- 'general_commit_latency': commit_latency
- }
-
- @staticmethod
- def _get_pool_usage(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and usage bytes' value
- """
- return {pool['name']: pool['stats']['kb_used']}
-
- @staticmethod
- def _get_pool_objects(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and object numbers
- """
- return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
-
- @staticmethod
- def _get_pool_rw(pool):
- """
- Get read/write kb and operations in a pool
- :return: A pool dict with both read/write bytes and operations.
- """
- return {
- 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
- 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
- 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
- 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
- }
-
- @staticmethod
- def _get_osd_usage(osd):
- """
- Process raw data into osd dict information to get osd usage
- :return: A osd dict with osd name's key and usage bytes' value
- """
- return {osd['name']: float(osd['kb_used'])}
-
- @staticmethod
- def _get_osd_size(osd):
- """
- Process raw data into osd dict information to get osd size (kb)
- :return: A osd dict with osd name's key and size bytes' value
- """
- return {'size_{0}'.format(osd['name']): float(osd['kb'])}
-
- @staticmethod
- def _get_osd_latency(osd):
- """
- Get ceph osd apply and commit latency
- :return: A osd dict with osd name's key with both apply and commit latency values
- """
- return {
- 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
- 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
- }
-
- def _get_df(self):
- """
- Get ceph df output
- :return: ceph df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'df',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
- def _get_osd_df(self):
- """
- Get ceph osd df output
- :return: ceph osd df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd df',
- 'format': 'json'
- }), b'')[1].decode('utf-8').replace('-nan', '"-nan"'))
-
- def _get_osd_perf(self):
- """
- Get ceph osd performance
- :return: ceph osd perf --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd perf',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
- def _get_osd_pool_stats(self):
- """
- Get ceph osd pool status.
- This command is used to get information about both
- read/write operation and bytes per second on each pool
- :return: ceph osd pool stats --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd pool stats',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
-
-def get_osd_perf_infos(osd_perf):
- # https://github.com/netdata/netdata/issues/8247
- # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
- if 'osd_perf_infos' in osd_perf:
- return osd_perf['osd_perf_infos']
- return osd_perf['osdstats']['osd_perf_infos']
diff --git a/collectors/python.d.plugin/ceph/ceph.conf b/collectors/python.d.plugin/ceph/ceph.conf
deleted file mode 100644
index 81788e866..000000000
--- a/collectors/python.d.plugin/ceph/ceph.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for ceph stats
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 10 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, ceph plugin also supports the following:
-#
-# config_file: 'config_file' # Ceph config file.
-# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
-# # and keyring file must be read group permission.
-# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
-# # creating a read only key for pulling data v.s. admin
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-config_file: '/etc/ceph/ceph.conf'
-keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-rados_id: 'admin'
diff --git a/collectors/python.d.plugin/ceph/integrations/ceph.md b/collectors/python.d.plugin/ceph/integrations/ceph.md
deleted file mode 100644
index cfda01fbe..000000000
--- a/collectors/python.d.plugin/ceph/integrations/ceph.md
+++ /dev/null
@@ -1,194 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/metadata.yaml"
-sidebar_label: "Ceph"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Ceph
-
-
-<img src="https://netdata.cloud/img/ceph.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: ceph
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.
-
-Uses the `rados` python module to connect to a Ceph cluster.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Ceph instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ceph.general_usage | avail, used | KiB |
-| ceph.general_objects | cluster | objects |
-| ceph.general_bytes | read, write | KiB/s |
-| ceph.general_operations | read, write | operations |
-| ceph.general_latency | apply, commit | milliseconds |
-| ceph.pool_usage | a dimension per Ceph Pool | KiB |
-| ceph.pool_objects | a dimension per Ceph Pool | objects |
-| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |
-| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |
-| ceph.pool_read_operations | a dimension per Ceph Pool | operations |
-| ceph.pool_write_operations | a dimension per Ceph Pool | operations |
-| ceph.osd_usage | a dimension per Ceph OSD | KiB |
-| ceph.osd_size | a dimension per Ceph OSD | KiB |
-| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |
-| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |
-
-
-## Setup
-
-### Prerequisites
-
-#### `rados` python module
-
-Make sure the `rados` python module is installed
-
-#### Granting read permissions to ceph group from keyring file
-
-Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`
-
-#### Create a specific rados_id
-
-You can optionally create a rados_id to use instead of admin
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/ceph.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/ceph.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| config_file | Ceph config file | | yes |
-| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |
-| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |
-
-</details>
-
-#### Examples
-
-##### Basic local Ceph cluster
-
-A basic configuration to connect to a local Ceph cluster.
-
-```yaml
-local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin ceph debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/ceph/metadata.yaml b/collectors/python.d.plugin/ceph/metadata.yaml
deleted file mode 100644
index 0f06470b1..000000000
--- a/collectors/python.d.plugin/ceph/metadata.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: ceph
- monitored_instance:
- name: Ceph
- link: 'https://ceph.io/'
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'ceph.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - ceph
- - storage
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.'
- method_description: 'Uses the `rados` python module to connect to a Ceph cluster.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: '`rados` python module'
- description: 'Make sure the `rados` python module is installed'
- - title: 'Granting read permissions to ceph group from keyring file'
- description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`'
- - title: 'Create a specific rados_id'
- description: 'You can optionally create a rados_id to use instead of admin'
- configuration:
- file:
- name: python.d/ceph.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: config_file
- description: Ceph config file
- default_value: ''
- required: true
- - name: keyring_file
- description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission.
- default_value: ''
- required: true
- - name: rados_id
- description: A rados user id to use for connecting to the Ceph cluster.
- default_value: 'admin'
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic local Ceph cluster
- description: A basic configuration to connect to a local Ceph cluster.
- folding:
- enabled: false
- config: |
- local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ceph_cluster_space_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf
- metric: ceph.general_usage
- info: cluster disk space utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: ceph.general_usage
- description: Ceph General Space
- unit: "KiB"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: ceph.general_objects
- description: Ceph General Objects
- unit: "objects"
- chart_type: area
- dimensions:
- - name: cluster
- - name: ceph.general_bytes
- description: Ceph General Read/Write Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: ceph.general_operations
- description: Ceph General Read/Write Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: ceph.general_latency
- description: Ceph General Apply/Commit latency
- unit: "milliseconds"
- chart_type: area
- dimensions:
- - name: apply
- - name: commit
- - name: ceph.pool_usage
- description: Ceph Pools
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_objects
- description: Ceph Pools
- unit: "objects"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_read_bytes
- description: Ceph Read Pool Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_write_bytes
- description: Ceph Write Pool Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_read_operations
- description: Ceph Read Pool Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_write_operations
- description: Ceph Write Pool Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.osd_usage
- description: Ceph OSDs
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.osd_size
- description: Ceph OSDs size
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.apply_latency
- description: Ceph OSDs apply latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.commit_latency
- description: Ceph OSDs commit latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
diff --git a/collectors/python.d.plugin/changefinder/Makefile.inc b/collectors/python.d.plugin/changefinder/Makefile.inc
deleted file mode 100644
index 01a92408b..000000000
--- a/collectors/python.d.plugin/changefinder/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += changefinder/changefinder.chart.py
-dist_pythonconfig_DATA += changefinder/changefinder.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += changefinder/README.md changefinder/Makefile.inc
-
diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md
deleted file mode 120000
index 0ca704eb1..000000000
--- a/collectors/python.d.plugin/changefinder/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/python.d_changefinder.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/changefinder/changefinder.chart.py b/collectors/python.d.plugin/changefinder/changefinder.chart.py
deleted file mode 100644
index 2a69cd9f5..000000000
--- a/collectors/python.d.plugin/changefinder/changefinder.chart.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: changefinder netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-import re
-
-from bases.FrameworkServices.UrlService import UrlService
-
-import numpy as np
-import changefinder
-from scipy.stats import percentileofscore
-
-update_every = 5
-disabled_by_default = True
-
-ORDER = [
- 'scores',
- 'flags'
-]
-
-CHARTS = {
- 'scores': {
- 'options': [None, 'ChangeFinder', 'score', 'Scores', 'changefinder.scores', 'line'],
- 'lines': []
- },
- 'flags': {
- 'options': [None, 'ChangeFinder', 'flag', 'Flags', 'changefinder.flags', 'stacked'],
- 'lines': []
- }
-}
-
-DEFAULT_PROTOCOL = 'http'
-DEFAULT_HOST = '127.0.0.1:19999'
-DEFAULT_CHARTS_REGEX = 'system.*'
-DEFAULT_MODE = 'per_chart'
-DEFAULT_CF_R = 0.5
-DEFAULT_CF_ORDER = 1
-DEFAULT_CF_SMOOTH = 15
-DEFAULT_CF_DIFF = False
-DEFAULT_CF_THRESHOLD = 99
-DEFAULT_N_SCORE_SAMPLES = 14400
-DEFAULT_SHOW_SCORES = False
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.protocol = self.configuration.get('protocol', DEFAULT_PROTOCOL)
- self.host = self.configuration.get('host', DEFAULT_HOST)
- self.url = '{}://{}/api/v1/allmetrics?format=json'.format(self.protocol, self.host)
- self.charts_regex = re.compile(self.configuration.get('charts_regex', DEFAULT_CHARTS_REGEX))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- self.mode = self.configuration.get('mode', DEFAULT_MODE)
- self.n_score_samples = int(self.configuration.get('n_score_samples', DEFAULT_N_SCORE_SAMPLES))
- self.show_scores = int(self.configuration.get('show_scores', DEFAULT_SHOW_SCORES))
- self.cf_r = float(self.configuration.get('cf_r', DEFAULT_CF_R))
- self.cf_order = int(self.configuration.get('cf_order', DEFAULT_CF_ORDER))
- self.cf_smooth = int(self.configuration.get('cf_smooth', DEFAULT_CF_SMOOTH))
- self.cf_diff = bool(self.configuration.get('cf_diff', DEFAULT_CF_DIFF))
- self.cf_threshold = float(self.configuration.get('cf_threshold', DEFAULT_CF_THRESHOLD))
- self.collected_dims = {'scores': set(), 'flags': set()}
- self.models = {}
- self.x_latest = {}
- self.scores_latest = {}
- self.scores_samples = {}
-
- def get_score(self, x, model):
- """Update the score for the model based on most recent data, flag if it's percentile passes self.cf_threshold.
- """
-
- # get score
- if model not in self.models:
- # initialise empty model if needed
- self.models[model] = changefinder.ChangeFinder(r=self.cf_r, order=self.cf_order, smooth=self.cf_smooth)
- # if the update for this step fails then just fallback to last known score
- try:
- score = self.models[model].update(x)
- self.scores_latest[model] = score
- except Exception as _:
- score = self.scores_latest.get(model, 0)
- score = 0 if np.isnan(score) else score
-
- # update sample scores used to calculate percentiles
- if model in self.scores_samples:
- self.scores_samples[model].append(score)
- else:
- self.scores_samples[model] = [score]
- self.scores_samples[model] = self.scores_samples[model][-self.n_score_samples:]
-
- # convert score to percentile
- score = percentileofscore(self.scores_samples[model], score)
-
- # flag based on score percentile
- flag = 1 if score >= self.cf_threshold else 0
-
- return score, flag
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- if not self.charts:
- return
-
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def diff(self, x, model):
- """Take difference of data.
- """
- x_diff = x - self.x_latest.get(model, 0)
- self.x_latest[model] = x
- x = x_diff
- return x
-
- def _get_data(self):
-
- # pull data from self.url
- raw_data = self._get_raw_data()
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
-
- # filter to just the data for the charts specified
- charts_in_scope = list(filter(self.charts_regex.match, raw_data.keys()))
- charts_in_scope = [c for c in charts_in_scope if c not in self.charts_to_exclude]
-
- data_score = {}
- data_flag = {}
-
- # process each chart
- for chart in charts_in_scope:
-
- if self.mode == 'per_chart':
-
- # average dims on chart and run changefinder on that average
- x = [raw_data[chart]['dimensions'][dim]['value'] for dim in raw_data[chart]['dimensions']]
- x = [x for x in x if x is not None]
-
- if len(x) > 0:
-
- x = sum(x) / len(x)
- x = self.diff(x, chart) if self.cf_diff else x
-
- score, flag = self.get_score(x, chart)
- if self.show_scores:
- data_score['{}_score'.format(chart)] = score * 100
- data_flag[chart] = flag
-
- else:
-
- # run changefinder on each individual dim
- for dim in raw_data[chart]['dimensions']:
-
- chart_dim = '{}|{}'.format(chart, dim)
-
- x = raw_data[chart]['dimensions'][dim]['value']
- x = x if x else 0
- x = self.diff(x, chart_dim) if self.cf_diff else x
-
- score, flag = self.get_score(x, chart_dim)
- if self.show_scores:
- data_score['{}_score'.format(chart_dim)] = score * 100
- data_flag[chart_dim] = flag
-
- self.validate_charts('flags', data_flag)
-
- if self.show_scores & len(data_score) > 0:
- data_score['average_score'] = sum(data_score.values()) / len(data_score)
- self.validate_charts('scores', data_score, divisor=100)
-
- data = {**data_score, **data_flag}
-
- return data
diff --git a/collectors/python.d.plugin/changefinder/changefinder.conf b/collectors/python.d.plugin/changefinder/changefinder.conf
deleted file mode 100644
index 56a681f1e..000000000
--- a/collectors/python.d.plugin/changefinder/changefinder.conf
+++ /dev/null
@@ -1,74 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-local:
-
- # A friendly name for this job.
- name: 'local'
-
- # What host to pull data from.
- host: '127.0.0.1:19999'
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: ''
-
- # Get ChangeFinder scores 'per_dim' or 'per_chart'.
- mode: 'per_chart'
-
- # Default parameters that can be passed to the changefinder library.
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
-
- # The percentile above which scores will be flagged.
- cf_threshold: 99
-
- # The number of recent scores to use when calculating the percentile of the changefinder score.
- n_score_samples: 14400
-
- # Set to true if you also want to chart the percentile scores in addition to the flags.
- # Mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time.
- show_scores: false
diff --git a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
deleted file mode 100644
index c338c9374..000000000
--- a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
+++ /dev/null
@@ -1,217 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/metadata.yaml"
-sidebar_label: "python.d changefinder"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# python.d changefinder
-
-Plugin: python.d.plugin
-Module: changefinder
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
-perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
-on your Netdata charts and/or dimensions.
-
-
-Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).
-### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
- typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
- this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
- score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
- already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
- should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
- approaches which need some initial window of time before they can be useful.
-- As this collector does most of the work in Python itself, you may want to try it out first on a test or development
- system to get a sense of its performance characteristics on a node similar to where you would like to use it.
-- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
- typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_changefinder`) of ~30ms.
- - Typically ~1% additional cpu usage.
- - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default this collector will work over all `system.*` charts.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per python.d changefinder instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| changefinder.scores | a dimension per chart | score |
-| changefinder.flags | a dimension per chart | flag |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Python Requirements
-
-This collector will only work with Python 3 and requires the packages below be installed.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages for the netdata user
-pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
-```
-
-**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
-of your `netdata.conf` file.
-
-```yaml
-[ plugin:python.d ]
- # update every = 1
- command options = -ppython3
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/changefinder.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/changefinder.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
-| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |
-| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |
-| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |
-| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |
-| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |
-| cf_threshold | the percentile above which scores will be flagged. | 99 | no |
-| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |
-| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |
-
-</details>
-
-#### Examples
-
-##### Default
-
-Default configuration.
-
-```yaml
-local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: ''
- mode: 'per_chart'
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
- cf_threshold: 99
- n_score_samples: 14400
- show_scores: false
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin changefinder debug trace
- ```
-
-### Debug Mode
-
-
-
-### Log Messages
-
-
-
-
diff --git a/collectors/python.d.plugin/changefinder/metadata.yaml b/collectors/python.d.plugin/changefinder/metadata.yaml
deleted file mode 100644
index 170d9146a..000000000
--- a/collectors/python.d.plugin/changefinder/metadata.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: changefinder
- monitored_instance:
- name: python.d changefinder
- link: ""
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - change detection
- - anomaly detection
- - machine learning
- - ml
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
- perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
- on your Netdata charts and/or dimensions.
- method_description: >
- Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a
- changepoint score for each chart or dimension you configure it to work on. This is
- an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step
- to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap
- to compute at each step of data collection (see the notes section below for more details) and it should scale fairly
- well to work on lots of charts or hosts (if running on a parent node for example).
-
- ### Notes
- - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
- typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
- this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
- score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
- already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
- should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
- approaches which need some initial window of time before they can be useful.
- - As this collector does most of the work in Python itself, you may want to try it out first on a test or development
- system to get a sense of its performance characteristics on a node similar to where you would like to use it.
- - On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
- typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_changefinder`) of ~30ms.
- - Typically ~1% additional cpu usage.
- - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default this collector will work over all `system.*` charts."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Python Requirements
- description: |
- This collector will only work with Python 3 and requires the packages below be installed.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # install required packages for the netdata user
- pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
- ```
-
- **Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
- of your `netdata.conf` file.
-
- ```yaml
- [ plugin:python.d ]
- # update every = 1
- command options = -ppython3
- ```
- configuration:
- file:
- name: python.d/changefinder.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: charts_regex
- description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
- default_value: "system\\..*"
- required: true
- - name: charts_to_exclude
- description: |
- charts to exclude, useful if you would like to exclude some specific charts.
- note: should be a ',' separated string like 'chart.name,chart.name'.
- default_value: ""
- required: false
- - name: mode
- description: get ChangeFinder scores 'per_dim' or 'per_chart'.
- default_value: "per_chart"
- required: true
- - name: cf_r
- description: default parameters that can be passed to the changefinder library.
- default_value: 0.5
- required: false
- - name: cf_order
- description: default parameters that can be passed to the changefinder library.
- default_value: 1
- required: false
- - name: cf_smooth
- description: default parameters that can be passed to the changefinder library.
- default_value: 15
- required: false
- - name: cf_threshold
- description: the percentile above which scores will be flagged.
- default_value: 99
- required: false
- - name: n_score_samples
- description: the number of recent scores to use when calculating the percentile of the changefinder score.
- default_value: 14400
- required: false
- - name: show_scores
- description: |
- set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time)
- default_value: false
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Default
- description: Default configuration.
- folding:
- enabled: false
- config: |
- local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: ''
- mode: 'per_chart'
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
- cf_threshold: 99
- n_score_samples: 14400
- show_scores: false
- troubleshooting:
- problems:
- list:
- - name: "Debug Mode"
- description: |
- If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # run collector in debug using `nolock` option if netdata is already running the collector itself.
- /usr/libexec/netdata/plugins.d/python.d.plugin changefinder debug trace nolock
- ```
- - name: "Log Messages"
- description: |
- To see any relevant log messages you can use a command like below.
-
- ```bash
- grep 'changefinder' /var/log/netdata/error.log
- grep 'changefinder' /var/log/netdata/collector.log
- ```
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: changefinder.scores
- description: ChangeFinder
- unit: "score"
- chart_type: line
- dimensions:
- - name: a dimension per chart
- - name: changefinder.flags
- description: ChangeFinder
- unit: "flag"
- chart_type: stacked
- dimensions:
- - name: a dimension per chart
diff --git a/collectors/python.d.plugin/dovecot/Makefile.inc b/collectors/python.d.plugin/dovecot/Makefile.inc
deleted file mode 100644
index fd7d13bbb..000000000
--- a/collectors/python.d.plugin/dovecot/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += dovecot/dovecot.chart.py
-dist_pythonconfig_DATA += dovecot/dovecot.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += dovecot/README.md dovecot/Makefile.inc
-
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
deleted file mode 120000
index c4749cedc..000000000
--- a/collectors/python.d.plugin/dovecot/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/dovecot.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/dovecot/dovecot.chart.py b/collectors/python.d.plugin/dovecot/dovecot.chart.py
deleted file mode 100644
index dfaef28b5..000000000
--- a/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: dovecot netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-UNIX_SOCKET = '/var/run/dovecot/stats'
-
-ORDER = [
- 'sessions',
- 'logins',
- 'commands',
- 'faults',
- 'context_switches',
- 'io',
- 'net',
- 'syscalls',
- 'lookup',
- 'cache',
- 'auth',
- 'auth_cache'
-]
-
-CHARTS = {
- 'sessions': {
- 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
- 'lines': [
- ['num_connected_sessions', 'active sessions', 'absolute']
- ]
- },
- 'logins': {
- 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
- 'lines': [
- ['num_logins', 'logins', 'absolute']
- ]
- },
- 'commands': {
- 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
- 'lines': [
- ['num_cmds', 'commands', 'absolute']
- ]
- },
- 'faults': {
- 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
- 'lines': [
- ['min_faults', 'minor', 'absolute'],
- ['maj_faults', 'major', 'absolute']
- ]
- },
- 'context_switches': {
- 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
- 'line'],
- 'lines': [
- ['vol_cs', 'voluntary', 'absolute'],
- ['invol_cs', 'involuntary', 'absolute']
- ]
- },
- 'io': {
- 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
- 'lines': [
- ['disk_input', 'read', 'incremental', 1, 1024],
- ['disk_output', 'write', 'incremental', -1, 1024]
- ]
- },
- 'net': {
- 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
- 'lines': [
- ['read_bytes', 'read', 'incremental', 8, 1000],
- ['write_bytes', 'write', 'incremental', -8, 1000]
- ]
- },
- 'syscalls': {
- 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
- 'lines': [
- ['read_count', 'read', 'incremental'],
- ['write_count', 'write', 'incremental']
- ]
- },
- 'lookup': {
- 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
- 'lines': [
- ['mail_lookup_path', 'path', 'incremental'],
- ['mail_lookup_attr', 'attr', 'incremental']
- ]
- },
- 'cache': {
- 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
- 'lines': [
- ['mail_cache_hits', 'hits', 'incremental']
- ]
- },
- 'auth': {
- 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
- 'lines': [
- ['auth_successes', 'ok', 'absolute'],
- ['auth_failures', 'failed', 'absolute']
- ]
- },
- 'auth_cache': {
- 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
- 'lines': [
- ['auth_cache_hits', 'hit', 'absolute'],
- ['auth_cache_misses', 'miss', 'absolute']
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = None # localhost
- self.port = None # 24242
- self.unix_socket = UNIX_SOCKET
- self.request = 'EXPORT\tglobal\r\n'
-
- def _get_data(self):
- """
- Format data received from socket
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- except (ValueError, AttributeError):
- return None
-
- if raw is None:
- self.debug('dovecot returned no data')
- return None
-
- data = raw.split('\n')[:2]
- desc = data[0].split('\t')
- vals = data[1].split('\t')
- ret = dict()
- for i, _ in enumerate(desc):
- try:
- ret[str(desc[i])] = int(vals[i])
- except ValueError:
- continue
- return ret or None
diff --git a/collectors/python.d.plugin/dovecot/dovecot.conf b/collectors/python.d.plugin/dovecot/dovecot.conf
deleted file mode 100644
index 451dbc9ac..000000000
--- a/collectors/python.d.plugin/dovecot/dovecot.conf
+++ /dev/null
@@ -1,98 +0,0 @@
-# netdata python.d.plugin configuration for dovecot
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, dovecot also supports the following:
-#
-# socket: 'path/to/dovecot/stats'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 24242
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 24242
-
-localsocket:
- name : 'local'
- socket : '/var/run/dovecot/stats'
-
-localsocket_old:
- name : 'local'
- socket : '/var/run/dovecot/old-stats'
-
diff --git a/collectors/python.d.plugin/dovecot/integrations/dovecot.md b/collectors/python.d.plugin/dovecot/integrations/dovecot.md
deleted file mode 100644
index 4e7952765..000000000
--- a/collectors/python.d.plugin/dovecot/integrations/dovecot.md
+++ /dev/null
@@ -1,197 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/metadata.yaml"
-sidebar_label: "Dovecot"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Dovecot
-
-
-<img src="https://netdata.cloud/img/dovecot.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: dovecot
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.
-
-It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Dovecot instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| dovecot.sessions | active sessions | number |
-| dovecot.logins | logins | number |
-| dovecot.commands | commands | commands |
-| dovecot.faults | minor, major | faults |
-| dovecot.context_switches | voluntary, involuntary | switches |
-| dovecot.io | read, write | KiB/s |
-| dovecot.net | read, write | kilobits/s |
-| dovecot.syscalls | read, write | syscalls/s |
-| dovecot.lookup | path, attr | number/s |
-| dovecot.cache | hits | hits/s |
-| dovecot.auth | ok, failed | attempts |
-| dovecot.auth_cache | hit, miss | number |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Dovecot configuration
-
-The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/dovecot.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/dovecot.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |
-| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |
-| port | Used in combination with host, configures the port devcot listens to. | | no |
-
-</details>
-
-#### Examples
-
-##### Local TCP
-
-A basic TCP configuration.
-
-<details><summary>Config</summary>
-
-```yaml
-localtcpip:
- name: 'local'
- host: '127.0.0.1'
- port: 24242
-
-```
-</details>
-
-##### Local socket
-
-A basic local socket configuration
-
-<details><summary>Config</summary>
-
-```yaml
-localsocket:
- name: 'local'
- socket: '/var/run/dovecot/stats'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin dovecot debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/dovecot/metadata.yaml b/collectors/python.d.plugin/dovecot/metadata.yaml
deleted file mode 100644
index b247da846..000000000
--- a/collectors/python.d.plugin/dovecot/metadata.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: dovecot
- monitored_instance:
- name: Dovecot
- link: 'https://www.dovecot.org/'
- categories:
- - data-collection.mail-servers
- icon_filename: 'dovecot.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - dovecot
- - imap
- - mail
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.'
- method_description: 'It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Dovecot configuration'
- description: The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
- configuration:
- file:
- name: python.d/dovecot.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: socket
- description: Use this socket to communicate with Devcot
- default_value: /var/run/dovecot/stats
- required: false
- - name: host
- description: Instead of using a socket, you can point the collector to an ip for devcot statistics.
- default_value: ''
- required: false
- - name: port
- description: Used in combination with host, configures the port devcot listens to.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local TCP
- description: A basic TCP configuration.
- config: |
- localtcpip:
- name: 'local'
- host: '127.0.0.1'
- port: 24242
- - name: Local socket
- description: A basic local socket configuration
- config: |
- localsocket:
- name: 'local'
- socket: '/var/run/dovecot/stats'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: dovecot.sessions
- description: Dovecot Active Sessions
- unit: "number"
- chart_type: line
- dimensions:
- - name: active sessions
- - name: dovecot.logins
- description: Dovecot Logins
- unit: "number"
- chart_type: line
- dimensions:
- - name: logins
- - name: dovecot.commands
- description: Dovecot Commands
- unit: "commands"
- chart_type: line
- dimensions:
- - name: commands
- - name: dovecot.faults
- description: Dovecot Page Faults
- unit: "faults"
- chart_type: line
- dimensions:
- - name: minor
- - name: major
- - name: dovecot.context_switches
- description: Dovecot Context Switches
- unit: "switches"
- chart_type: line
- dimensions:
- - name: voluntary
- - name: involuntary
- - name: dovecot.io
- description: Dovecot Disk I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: dovecot.net
- description: Dovecot Network Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: dovecot.syscalls
- description: Dovecot Number of SysCalls
- unit: "syscalls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: dovecot.lookup
- description: Dovecot Lookups
- unit: "number/s"
- chart_type: stacked
- dimensions:
- - name: path
- - name: attr
- - name: dovecot.cache
- description: Dovecot Cache Hits
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: dovecot.auth
- description: Dovecot Authentications
- unit: "attempts"
- chart_type: stacked
- dimensions:
- - name: ok
- - name: failed
- - name: dovecot.auth_cache
- description: Dovecot Authentication Cache
- unit: "number"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
diff --git a/collectors/python.d.plugin/example/Makefile.inc b/collectors/python.d.plugin/example/Makefile.inc
deleted file mode 100644
index 1b027d5a7..000000000
--- a/collectors/python.d.plugin/example/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += example/example.chart.py
-dist_pythonconfig_DATA += example/example.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += example/README.md example/Makefile.inc
-
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
deleted file mode 120000
index 55877a99a..000000000
--- a/collectors/python.d.plugin/example/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/example_collector.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/example/example.chart.py b/collectors/python.d.plugin/example/example.chart.py
deleted file mode 100644
index d6c0b6658..000000000
--- a/collectors/python.d.plugin/example/example.chart.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: example netdata python.d module
-# Author: Put your name here (your github login)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from random import SystemRandom
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 90000
-
-ORDER = [
- 'random',
-]
-
-CHARTS = {
- 'random': {
- 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
- 'lines': [
- ['random1']
- ]
- }
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.random = SystemRandom()
- self.num_lines = self.configuration.get('num_lines', 4)
- self.lower = self.configuration.get('lower', 0)
- self.upper = self.configuration.get('upper', 100)
-
- @staticmethod
- def check():
- return True
-
- def get_data(self):
- data = dict()
-
- for i in range(0, self.num_lines):
- dimension_id = ''.join(['random', str(i)])
-
- if dimension_id not in self.charts['random']:
- self.charts['random'].add_dimension([dimension_id])
-
- data[dimension_id] = self.random.randint(self.lower, self.upper)
-
- return data
diff --git a/collectors/python.d.plugin/example/example.conf b/collectors/python.d.plugin/example/example.conf
deleted file mode 100644
index 31261b840..000000000
--- a/collectors/python.d.plugin/example/example.conf
+++ /dev/null
@@ -1,87 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear on the dashboard
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# num_lines: 4 # the number of lines to create
-# lower: 0 # the lower bound of numbers to randomly sample from
-# upper: 100 # the upper bound of numbers to randomly sample from
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-four_lines:
- name: "Four Lines" # the JOB's name as it will appear on the dashboard
- update_every: 1 # the JOB's data collection frequency
- priority: 60000 # the JOB's order on the dashboard
- penalty: yes # the JOB's penalty
- autodetection_retry: 0 # the JOB's re-check interval in seconds
- num_lines: 4 # the number of lines to create
- lower: 0 # the lower bound of numbers to randomly sample from
- upper: 100 # the upper bound of numbers to randomly sample from
-
-# if you wanted to make another job to run in addition to the one above then
-# you would just uncomment the job configuration below.
-# two_lines:
-# name: "Two Lines" # the JOB's name as it will appear on the dashboard
-# num_lines: 2 # the number of lines to create
-# lower: 50 # the lower bound of numbers to randomly sample from
-# upper: 75 # the upper bound of numbers to randomly sample from
diff --git a/collectors/python.d.plugin/example/integrations/example_collector.md b/collectors/python.d.plugin/example/integrations/example_collector.md
deleted file mode 100644
index 7dded67ba..000000000
--- a/collectors/python.d.plugin/example/integrations/example_collector.md
+++ /dev/null
@@ -1,171 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/metadata.yaml"
-sidebar_label: "Example collector"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Example collector
-
-Plugin: python.d.plugin
-Module: example
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Example collector that generates some random numbers as metrics.
-
-If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
-
-
-The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Example collector instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| example.random | random | number |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/example.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/example.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| num_lines | The number of lines to create. | 4 | no |
-| lower | The lower bound of numbers to randomly sample from. | 0 | no |
-| upper | The upper bound of numbers to randomly sample from. | 100 | no |
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-four_lines:
- name: "Four Lines"
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
- num_lines: 4
- lower: 0
- upper: 100
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin example debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/example/metadata.yaml b/collectors/python.d.plugin/example/metadata.yaml
deleted file mode 100644
index eae84d9e1..000000000
--- a/collectors/python.d.plugin/example/metadata.yaml
+++ /dev/null
@@ -1,138 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: example
- monitored_instance:
- name: Example collector
- link: https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/README.md
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - example
- - netdata
- - python
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Example collector that generates some random numbers as metrics.
-
- If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
- method_description: |
- The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/example.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: num_lines
- description: The number of lines to create.
- default_value: 4
- required: false
- - name: lower
- description: The lower bound of numbers to randomly sample from.
- default_value: 0
- required: false
- - name: upper
- description: The upper bound of numbers to randomly sample from.
- default_value: 100
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- four_lines:
- name: "Four Lines"
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
- num_lines: 4
- lower: 0
- upper: 100
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: example.random
- description: A random number
- unit: number
- chart_type: line
- dimensions:
- - name: random
diff --git a/collectors/python.d.plugin/exim/Makefile.inc b/collectors/python.d.plugin/exim/Makefile.inc
deleted file mode 100644
index 36ffa56d2..000000000
--- a/collectors/python.d.plugin/exim/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += exim/exim.chart.py
-dist_pythonconfig_DATA += exim/exim.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += exim/README.md exim/Makefile.inc
-
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
deleted file mode 120000
index f1f2ef9f9..000000000
--- a/collectors/python.d.plugin/exim/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/exim.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/exim/exim.chart.py b/collectors/python.d.plugin/exim/exim.chart.py
deleted file mode 100644
index 7238a1bea..000000000
--- a/collectors/python.d.plugin/exim/exim.chart.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: exim netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-EXIM_COMMAND = 'exim -bpc'
-
-ORDER = [
- 'qemails',
-]
-
-CHARTS = {
- 'qemails': {
- 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = EXIM_COMMAND
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- return {'emails': int(self._get_raw_data()[0])}
- except (ValueError, AttributeError):
- return None
diff --git a/collectors/python.d.plugin/exim/exim.conf b/collectors/python.d.plugin/exim/exim.conf
deleted file mode 100644
index 3b7e65922..000000000
--- a/collectors/python.d.plugin/exim/exim.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# netdata python.d.plugin configuration for exim
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# exim is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, exim also supports the following:
-#
-# command: 'exim -bpc' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# REQUIRED exim CONFIGURATION
-#
-# netdata will query exim as user netdata.
-# By default exim will refuse to respond.
-#
-# To allow querying exim as non-admin user, please set the following
-# to your exim configuration:
-#
-# queue_list_requires_admin = false
-#
-# Your exim configuration should be in
-#
-# /etc/exim/exim4.conf
-# or
-# /etc/exim4/conf.d/main/000_local_options
-#
-# Please consult your distribution information to find the exact file.
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- command: 'exim -bpc'
diff --git a/collectors/python.d.plugin/exim/integrations/exim.md b/collectors/python.d.plugin/exim/integrations/exim.md
deleted file mode 100644
index f0ae33d3e..000000000
--- a/collectors/python.d.plugin/exim/integrations/exim.md
+++ /dev/null
@@ -1,181 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/metadata.yaml"
-sidebar_label: "Exim"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Exim
-
-
-<img src="https://netdata.cloud/img/exim.jpg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: exim
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Exim mail queue.
-
-It uses the `exim` command line binary to get the statistics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Exim instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| exim.qemails | emails | emails |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Exim configuration - local installation
-
-The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
-
-1. Edit the `exim` configuration with your preferred editor and add:
-`queue_list_requires_admin = false`
-2. Restart `exim` and Netdata
-
-
-#### Exim configuration - WHM (CPanel) server
-
-On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
-
-1. Login to WHM
-2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
-3. Scroll down to the button **Add additional configuration setting** and click on it.
-4. In the new dropdown which will appear above we need to find and choose:
-`queue_list_requires_admin` and set to `false`
-5. Scroll to the end and click the **Save** button.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/exim.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/exim.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| command | Path and command to the `exim` binary | exim -bpc | no |
-
-</details>
-
-#### Examples
-
-##### Local exim install
-
-A basic local exim install
-
-```yaml
-local:
- command: 'exim -bpc'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin exim debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/exim/metadata.yaml b/collectors/python.d.plugin/exim/metadata.yaml
deleted file mode 100644
index a8be02d99..000000000
--- a/collectors/python.d.plugin/exim/metadata.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: exim
- monitored_instance:
- name: Exim
- link: "https://www.exim.org/"
- categories:
- - data-collection.mail-servers
- icon_filename: "exim.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - exim
- - mail
- - server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors Exim mail queue."
- method_description: "It uses the `exim` command line binary to get the statistics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Exim configuration - local installation"
- description: |
- The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
-
- 1. Edit the `exim` configuration with your preferred editor and add:
- `queue_list_requires_admin = false`
- 2. Restart `exim` and Netdata
- - title: "Exim configuration - WHM (CPanel) server"
- description: |
- On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
-
- 1. Login to WHM
- 2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
- 3. Scroll down to the button **Add additional configuration setting** and click on it.
- 4. In the new dropdown which will appear above we need to find and choose:
- `queue_list_requires_admin` and set to `false`
- 5. Scroll to the end and click the **Save** button.
- configuration:
- file:
- name: python.d/exim.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: command
- description: Path and command to the `exim` binary
- default_value: "exim -bpc"
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local exim install
- description: A basic local exim install
- config: |
- local:
- command: 'exim -bpc'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: exim.qemails
- description: Exim Queue Emails
- unit: "emails"
- chart_type: line
- dimensions:
- - name: emails
diff --git a/collectors/python.d.plugin/fail2ban/Makefile.inc b/collectors/python.d.plugin/fail2ban/Makefile.inc
deleted file mode 100644
index 31e117e53..000000000
--- a/collectors/python.d.plugin/fail2ban/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += fail2ban/fail2ban.chart.py
-dist_pythonconfig_DATA += fail2ban/fail2ban.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += fail2ban/README.md fail2ban/Makefile.inc
-
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
deleted file mode 120000
index 642a8bcf5..000000000
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/fail2ban.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py b/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
deleted file mode 100644
index 76f6d92b4..000000000
--- a/collectors/python.d.plugin/fail2ban/fail2ban.chart.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: fail2ban log netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-from collections import defaultdict
-from glob import glob
-
-from bases.FrameworkServices.LogService import LogService
-
-ORDER = [
- 'jails_failed_attempts',
- 'jails_bans',
- 'jails_banned_ips',
-]
-
-
-def charts(jails):
- """
- Chart definitions creating
- """
-
- ch = {
- ORDER[0]: {
- 'options': [None, 'Failed attempts', 'attempts/s', 'failed attempts', 'fail2ban.failed_attempts', 'line'],
- 'lines': []
- },
- ORDER[1]: {
- 'options': [None, 'Bans', 'bans/s', 'bans', 'fail2ban.bans', 'line'],
- 'lines': []
- },
- ORDER[2]: {
- 'options': [None, 'Banned IP addresses (since the last restart of netdata)', 'ips', 'banned ips',
- 'fail2ban.banned_ips', 'line'],
- 'lines': []
- },
- }
- for jail in jails:
- dim = ['{0}_failed_attempts'.format(jail), jail, 'incremental']
- ch[ORDER[0]]['lines'].append(dim)
-
- dim = [jail, jail, 'incremental']
- ch[ORDER[1]]['lines'].append(dim)
-
- dim = ['{0}_in_jail'.format(jail), jail, 'absolute']
- ch[ORDER[2]]['lines'].append(dim)
-
- return ch
-
-
-RE_JAILS = re.compile(r'\[([a-zA-Z0-9_-]+)\][^\[\]]+?enabled\s+= +(true|yes|false|no)')
-
-ACTION_BAN = 'Ban'
-ACTION_UNBAN = 'Unban'
-ACTION_RESTORE_BAN = 'Restore Ban'
-ACTION_FOUND = 'Found'
-
-# Example:
-# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Found 203.0.113.1
-# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Ban 203.0.113.1
-# 2018-09-12 11:45:58,727 fail2ban.actions[25029]: WARNING [ssh] Restore Ban 203.0.113.1
-# 2018-09-12 11:45:53,715 fail2ban.actions[25029]: WARNING [ssh] Unban 203.0.113.1
-RE_DATA = re.compile(
- r'\[(?P<jail>[A-Za-z-_0-9]+)\] (?P<action>{0}|{1}|{2}|{3}) (?P<ip>[a-f0-9.:]+)'.format(
- ACTION_BAN, ACTION_UNBAN, ACTION_RESTORE_BAN, ACTION_FOUND
- )
-)
-
-DEFAULT_JAILS = [
- 'ssh',
-]
-
-
-class Service(LogService):
- def __init__(self, configuration=None, name=None):
- LogService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = dict()
- self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
- self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
- self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
- self.exclude = self.configuration.get('exclude', str())
- self.monitoring_jails = list()
- self.banned_ips = defaultdict(set)
- self.data = dict()
-
- def check(self):
- """
- :return: bool
- """
- if not self.conf_path.endswith(('.conf', '.local')):
- self.error('{0} is a wrong conf path name, must be *.conf or *.local'.format(self.conf_path))
- return False
-
- if not os.access(self.log_path, os.R_OK):
- self.error('{0} is not readable'.format(self.log_path))
- return False
-
- if os.path.getsize(self.log_path) == 0:
- self.error('{0} is empty'.format(self.log_path))
- return False
-
- self.monitoring_jails = self.jails_auto_detection()
- for jail in self.monitoring_jails:
- self.data['{0}_failed_attempts'.format(jail)] = 0
- self.data[jail] = 0
- self.data['{0}_in_jail'.format(jail)] = 0
-
- self.definitions = charts(self.monitoring_jails)
- self.info('monitoring jails: {0}'.format(self.monitoring_jails))
-
- return True
-
- def get_data(self):
- """
- :return: dict
- """
- raw = self._get_raw_data()
-
- if not raw:
- return None if raw is None else self.data
-
- for row in raw:
- match = RE_DATA.search(row)
-
- if not match:
- continue
-
- match = match.groupdict()
-
- if match['jail'] not in self.monitoring_jails:
- continue
-
- jail, action, ip = match['jail'], match['action'], match['ip']
-
- if action == ACTION_FOUND:
- self.data['{0}_failed_attempts'.format(jail)] += 1
- elif action in (ACTION_BAN, ACTION_RESTORE_BAN):
- self.data[jail] += 1
- if ip not in self.banned_ips[jail]:
- self.banned_ips[jail].add(ip)
- self.data['{0}_in_jail'.format(jail)] += 1
- elif action == ACTION_UNBAN:
- if ip in self.banned_ips[jail]:
- self.banned_ips[jail].remove(ip)
- self.data['{0}_in_jail'.format(jail)] -= 1
-
- return self.data
-
- def get_files_from_dir(self, dir_path, suffix):
- """
- :return: list
- """
- if not os.path.isdir(dir_path):
- self.error('{0} is not a directory'.format(dir_path))
- return list()
-
- return glob('{0}/*.{1}'.format(self.conf_dir, suffix))
-
- def get_jails_from_file(self, file_path):
- """
- :return: list
- """
- if not os.access(file_path, os.R_OK):
- self.error('{0} is not readable or not exist'.format(file_path))
- return list()
-
- with open(file_path, 'rt') as f:
- lines = f.readlines()
- raw = ' '.join(line for line in lines if line.startswith(('[', 'enabled')))
-
- match = RE_JAILS.findall(raw)
- # Result: [('ssh', 'true'), ('dropbear', 'true'), ('pam-generic', 'true'), ...]
-
- if not match:
- self.debug('{0} parse failed'.format(file_path))
- return list()
-
- return match
-
- def jails_auto_detection(self):
- """
- :return: list
-
- Parses jail configuration files. Returns list of enabled jails.
- According man jail.conf parse order must be
- * jail.conf
- * jail.d/*.conf (in alphabetical order)
- * jail.local
- * jail.d/*.local (in alphabetical order)
- """
- jails_files, all_jails, active_jails = list(), list(), list()
-
- jails_files.append('{0}.conf'.format(self.conf_path.rsplit('.')[0]))
- jails_files.extend(self.get_files_from_dir(self.conf_dir, 'conf'))
- jails_files.append('{0}.local'.format(self.conf_path.rsplit('.')[0]))
- jails_files.extend(self.get_files_from_dir(self.conf_dir, 'local'))
-
- self.debug('config files to parse: {0}'.format(jails_files))
-
- for f in jails_files:
- all_jails.extend(self.get_jails_from_file(f))
-
- exclude = self.exclude.split()
-
- for name, status in all_jails:
- if name in exclude:
- continue
-
- if status in ('true', 'yes') and name not in active_jails:
- active_jails.append(name)
- elif status in ('false', 'no') and name in active_jails:
- active_jails.remove(name)
-
- return active_jails or DEFAULT_JAILS
diff --git a/collectors/python.d.plugin/fail2ban/fail2ban.conf b/collectors/python.d.plugin/fail2ban/fail2ban.conf
deleted file mode 100644
index a36436b51..000000000
--- a/collectors/python.d.plugin/fail2ban/fail2ban.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for fail2ban
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, fail2ban also supports the following:
-#
-# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
-# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
-# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
-# exclude: 'jails you want to exclude from autodetection' # Default: none
-#------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
deleted file mode 100644
index a7116be5e..000000000
--- a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
+++ /dev/null
@@ -1,209 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/metadata.yaml"
-sidebar_label: "Fail2ban"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Authentication and Authorization"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Fail2ban
-
-
-<img src="https://netdata.cloud/img/fail2ban.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: fail2ban
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.
-
-
-It collects metrics through reading the default log and configuration files of fail2ban.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The `fail2ban.log` file must be readable by the user `netdata`.
- - change the file ownership and access permissions.
- - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.
-
-To change the file ownership and access permissions, execute the following:
-
-```shell
-sudo chown root:netdata /var/log/fail2ban.log
-sudo chmod 640 /var/log/fail2ban.log
-```
-
-To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:
-
-```shell
-/var/log/fail2ban.log {
-
- weekly
- rotate 4
- compress
-
- delaycompress
- missingok
- postrotate
- fail2ban-client flushlogs 1>/dev/null
- endscript
-
- # If fail2ban runs as non-root it still needs to have write access
- # to logfiles.
- # create 640 fail2ban adm
- create 640 root netdata
-}
-```
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.
-If conf file is not found default jail is ssh.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Fail2ban instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| fail2ban.failed_attempts | a dimension per jail | attempts/s |
-| fail2ban.bans | a dimension per jail | bans/s |
-| fail2ban.banned_ips | a dimension per jail | ips |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/fail2ban.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/fail2ban.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |
-| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |
-| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |
-| exclude | jails you want to exclude from autodetection. | | no |
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin fail2ban debug trace
- ```
-
-### Debug Mode
-
-
-
-
diff --git a/collectors/python.d.plugin/fail2ban/metadata.yaml b/collectors/python.d.plugin/fail2ban/metadata.yaml
deleted file mode 100644
index 61f762679..000000000
--- a/collectors/python.d.plugin/fail2ban/metadata.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: fail2ban
- monitored_instance:
- name: Fail2ban
- link: https://www.fail2ban.org/
- categories:
- - data-collection.authentication-and-authorization
- icon_filename: "fail2ban.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - fail2ban
- - security
- - authentication
- - authorization
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Monitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.
- method_description: |
- It collects metrics through reading the default log and configuration files of fail2ban.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: |
- The `fail2ban.log` file must be readable by the user `netdata`.
- - change the file ownership and access permissions.
- - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.
-
- To change the file ownership and access permissions, execute the following:
-
- ```shell
- sudo chown root:netdata /var/log/fail2ban.log
- sudo chmod 640 /var/log/fail2ban.log
- ```
-
- To persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:
-
- ```shell
- /var/log/fail2ban.log {
-
- weekly
- rotate 4
- compress
-
- delaycompress
- missingok
- postrotate
- fail2ban-client flushlogs 1>/dev/null
- endscript
-
- # If fail2ban runs as non-root it still needs to have write access
- # to logfiles.
- # create 640 fail2ban adm
- create 640 root netdata
- }
- ```
- default_behavior:
- auto_detection:
- description: |
- By default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.
- If conf file is not found default jail is ssh.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/fail2ban.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: log_path
- description: path to fail2ban.log.
- default_value: /var/log/fail2ban.log
- required: false
- - name: conf_path
- description: path to jail.local/jail.conf.
- default_value: /etc/fail2ban/jail.local
- required: false
- - name: conf_dir
- description: path to jail.d/.
- default_value: /etc/fail2ban/jail.d/
- required: false
- - name: exclude
- description: jails you want to exclude from autodetection.
- default_value: ""
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
- troubleshooting:
- problems:
- list:
- - name: Debug Mode
- description: |
- To troubleshoot issues with the `fail2ban` module, run the `python.d.plugin` with the debug option enabled.
- The output will give you the output of the data collection job or error messages on why the collector isn't working.
-
- First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
- not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
- plugin's directory, switch to the `netdata` user.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- sudo su -s /bin/bash netdata
- ```
-
- Now you can manually run the `fail2ban` module in debug mode:
-
- ```bash
- ./python.d.plugin fail2ban debug trace
- ```
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: fail2ban.failed_attempts
- description: Failed attempts
- unit: "attempts/s"
- chart_type: line
- dimensions:
- - name: a dimension per jail
- - name: fail2ban.bans
- description: Bans
- unit: "bans/s"
- chart_type: line
- dimensions:
- - name: a dimension per jail
- - name: fail2ban.banned_ips
- description: Banned IP addresses (since the last restart of netdata)
- unit: "ips"
- chart_type: line
- dimensions:
- - name: a dimension per jail
diff --git a/collectors/python.d.plugin/gearman/Makefile.inc b/collectors/python.d.plugin/gearman/Makefile.inc
deleted file mode 100644
index 275adf1c1..000000000
--- a/collectors/python.d.plugin/gearman/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += gearman/gearman.chart.py
-dist_pythonconfig_DATA += gearman/gearman.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += gearman/README.md gearman/Makefile.inc
-
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
deleted file mode 120000
index 70189d698..000000000
--- a/collectors/python.d.plugin/gearman/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/gearman.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/gearman/gearman.chart.py b/collectors/python.d.plugin/gearman/gearman.chart.py
deleted file mode 100644
index 5e280a4d8..000000000
--- a/collectors/python.d.plugin/gearman/gearman.chart.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Description: dovecot netdata python.d module
-# Author: Kyle Agronick (agronick)
-# SPDX-License-Identifier: GPL-3.0+
-
-# Gearman Netdata Plugin
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-CHARTS = {
- 'total_workers': {
- 'options': [None, 'Total Jobs', 'Jobs', 'Total Jobs', 'gearman.total_jobs', 'line'],
- 'lines': [
- ['total_pending', 'Pending', 'absolute'],
- ['total_running', 'Running', 'absolute'],
- ]
- },
-}
-
-
-def job_chart_template(job_name):
- return {
- 'options': [None, job_name, 'Jobs', 'Activity by Job', 'gearman.single_job', 'stacked'],
- 'lines': [
- ['{0}_pending'.format(job_name), 'Pending', 'absolute'],
- ['{0}_idle'.format(job_name), 'Idle', 'absolute'],
- ['{0}_running'.format(job_name), 'Running', 'absolute'],
- ]
- }
-
-
-def build_result_dict(job):
- """
- Get the status for each job
- :return: dict
- """
-
- total, running, available = job['metrics']
-
- idle = available - running
- pending = total - running
-
- return {
- '{0}_pending'.format(job['job_name']): pending,
- '{0}_idle'.format(job['job_name']): idle,
- '{0}_running'.format(job['job_name']): running,
- }
-
-
-def parse_worker_data(job):
- job_name = job[0]
- job_metrics = job[1:]
-
- return {
- 'job_name': job_name,
- 'metrics': job_metrics,
- }
-
-
-class GearmanReadException(BaseException):
- pass
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.request = "status\n"
- self._keep_alive = True
-
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 4730)
-
- self.tls = self.configuration.get('tls', False)
- self.cert = self.configuration.get('cert', None)
- self.key = self.configuration.get('key', None)
-
- self.active_jobs = set()
- self.definitions = deepcopy(CHARTS)
- self.order = ['total_workers']
-
- def _get_data(self):
- """
- Format data received from socket
- :return: dict
- """
-
- try:
- active_jobs = self.get_active_jobs()
- except GearmanReadException:
- return None
-
- found_jobs, job_data = self.process_jobs(active_jobs)
- self.remove_stale_jobs(found_jobs)
- return job_data
-
- def get_active_jobs(self):
- active_jobs = []
-
- for job in self.get_worker_data():
- parsed_job = parse_worker_data(job)
-
- # Gearman does not clean up old jobs
- # We only care about jobs that have
- # some relevant data
- if not any(parsed_job['metrics']):
- continue
-
- active_jobs.append(parsed_job)
-
- return active_jobs
-
- def get_worker_data(self):
- """
- Split the data returned from Gearman
- into a list of lists
-
- This returns the same output that you
- would get from a gearadmin --status
- command.
-
- Example output returned from
- _get_raw_data():
- prefix generic_worker4 78 78 500
- generic_worker2 78 78 500
- generic_worker3 0 0 760
- generic_worker1 0 0 500
-
- :return: list
- """
-
- try:
- raw = self._get_raw_data()
- except (ValueError, AttributeError):
- raise GearmanReadException()
-
- if raw is None:
- self.debug("Gearman returned no data")
- raise GearmanReadException()
-
- workers = list()
-
- for line in raw.splitlines()[:-1]:
- parts = line.split()
- if not parts:
- continue
-
- name = '_'.join(parts[:-3])
- try:
- values = [int(w) for w in parts[-3:]]
- except ValueError:
- continue
-
- w = [name]
- w.extend(values)
- workers.append(w)
-
- return workers
-
- def process_jobs(self, active_jobs):
-
- output = {
- 'total_pending': 0,
- 'total_idle': 0,
- 'total_running': 0,
- }
- found_jobs = set()
-
- for parsed_job in active_jobs:
-
- job_name = self.add_job(parsed_job)
- found_jobs.add(job_name)
- job_data = build_result_dict(parsed_job)
-
- for sum_value in ('pending', 'running', 'idle'):
- output['total_{0}'.format(sum_value)] += job_data['{0}_{1}'.format(job_name, sum_value)]
-
- output.update(job_data)
-
- return found_jobs, output
-
- def remove_stale_jobs(self, active_job_list):
- """
- Removes jobs that have no workers, pending jobs,
- or running jobs
- :param active_job_list: The latest list of active jobs
- :type active_job_list: iterable
- :return: None
- """
-
- for to_remove in self.active_jobs - active_job_list:
- self.remove_job(to_remove)
-
- def add_job(self, parsed_job):
- """
- Adds a job to the list of active jobs
- :param parsed_job: A parsed job dict
- :type parsed_job: dict
- :return: None
- """
-
- def add_chart(job_name):
- """
- Adds a new job chart
- :param job_name: The name of the job to add
- :type job_name: string
- :return: None
- """
-
- job_key = 'job_{0}'.format(job_name)
- template = job_chart_template(job_name)
- new_chart = self.charts.add_chart([job_key] + template['options'])
- for dimension in template['lines']:
- new_chart.add_dimension(dimension)
-
- if parsed_job['job_name'] not in self.active_jobs:
- add_chart(parsed_job['job_name'])
- self.active_jobs.add(parsed_job['job_name'])
-
- return parsed_job['job_name']
-
- def remove_job(self, job_name):
- """
- Removes a job to the list of active jobs
- :param job_name: The name of the job to remove
- :type job_name: string
- :return: None
- """
-
- def remove_chart(job_name):
- """
- Removes a job chart
- :param job_name: The name of the job to remove
- :type job_name: string
- :return: None
- """
-
- job_key = 'job_{0}'.format(job_name)
- self.charts[job_key].obsolete()
- del self.charts[job_key]
-
- remove_chart(job_name)
- self.active_jobs.remove(job_name)
diff --git a/collectors/python.d.plugin/gearman/gearman.conf b/collectors/python.d.plugin/gearman/gearman.conf
deleted file mode 100644
index 635e893ef..000000000
--- a/collectors/python.d.plugin/gearman/gearman.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for gearman
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, gearman also supports the following:
-#
-# host: localhost # The host running the Gearman server
-# port: 4730 # Port of the Gearman server
-# tls: no # Whether to use TLS or not
-# cert: /path/to/cert # Path to cert if using TLS
-# key: /path/to/key # Path to key if using TLS
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOB
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 4730 \ No newline at end of file
diff --git a/collectors/python.d.plugin/gearman/integrations/gearman.md b/collectors/python.d.plugin/gearman/integrations/gearman.md
deleted file mode 100644
index 3923d1401..000000000
--- a/collectors/python.d.plugin/gearman/integrations/gearman.md
+++ /dev/null
@@ -1,210 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/metadata.yaml"
-sidebar_label: "Gearman"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Distributed Computing Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Gearman
-
-
-<img src="https://netdata.cloud/img/gearman.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: gearman
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.
-
-This collector connects to a Gearman instance via either TCP or unix socket.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Gearman instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| gearman.total_jobs | Pending, Running | Jobs |
-
-### Per gearman job
-
-Metrics related to Gearman jobs. Each job produces its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| gearman.single_job | Pending, Idle, Runnning | Jobs |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-#### Socket permissions
-
-The gearman UNIX socket should have read permission for user netdata.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/gearman.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/gearman.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | URL or IP where gearman is running. | localhost | no |
-| port | Port of URL or IP where gearman is running. | 4730 | no |
-| tls | Use tls to connect to gearman. | false | no |
-| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |
-| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |
-
-</details>
-
-#### Examples
-
-##### Local gearman service
-
-A basic host and port gearman configuration for localhost.
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
-remote:
- name: 'remote'
- host: '192.0.2.1'
- port: 4730
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin gearman debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/gearman/metadata.yaml b/collectors/python.d.plugin/gearman/metadata.yaml
deleted file mode 100644
index f1760568e..000000000
--- a/collectors/python.d.plugin/gearman/metadata.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: gearman
- monitored_instance:
- name: Gearman
- link: "http://gearman.org/"
- categories:
- - data-collection.distributed-computing-systems
- icon_filename: "gearman.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - gearman
- - gearman job server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management."
- method_description: "This collector connects to a Gearman instance via either TCP or unix socket."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Socket permissions"
- description: The gearman UNIX socket should have read permission for user netdata.
- configuration:
- file:
- name: python.d/gearman.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: URL or IP where gearman is running.
- default_value: "localhost"
- required: false
- - name: port
- description: Port of URL or IP where gearman is running.
- default_value: "4730"
- required: false
- - name: tls
- description: Use tls to connect to gearman.
- default_value: "false"
- required: false
- - name: cert
- description: Provide a certificate file if needed to connect to a TLS gearman instance.
- default_value: ""
- required: false
- - name: key
- description: Provide a key file if needed to connect to a TLS gearman instance.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local gearman service
- description: A basic host and port gearman configuration for localhost.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
- remote:
- name: 'remote'
- host: '192.0.2.1'
- port: 4730
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: gearman_workers_queued
- link: https://github.com/netdata/netdata/blob/master/health/health.d/gearman.conf
- metric: gearman.single_job
- info: average number of queued jobs over the last 10 minutes
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: gearman.total_jobs
- description: Total Jobs
- unit: "Jobs"
- chart_type: line
- dimensions:
- - name: Pending
- - name: Running
- - name: gearman job
- description: "Metrics related to Gearman jobs. Each job produces its own set of the following metrics."
- labels: []
- metrics:
- - name: gearman.single_job
- description: "{job_name}"
- unit: "Jobs"
- chart_type: stacked
- dimensions:
- - name: Pending
- - name: Idle
- - name: Runnning
diff --git a/collectors/python.d.plugin/go_expvar/Makefile.inc b/collectors/python.d.plugin/go_expvar/Makefile.inc
deleted file mode 100644
index 74f50d765..000000000
--- a/collectors/python.d.plugin/go_expvar/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += go_expvar/go_expvar.chart.py
-dist_pythonconfig_DATA += go_expvar/go_expvar.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += go_expvar/README.md go_expvar/Makefile.inc
-
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
deleted file mode 120000
index f28a82f34..000000000
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/go_applications_expvar.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
deleted file mode 100644
index dca010817..000000000
--- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: go_expvar netdata python.d module
-# Author: Jan Kral (kralewitz)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import division
-
-import json
-from collections import namedtuple
-
-from bases.FrameworkServices.UrlService import UrlService
-
-MEMSTATS_ORDER = [
- 'memstats_heap',
- 'memstats_stack',
- 'memstats_mspan',
- 'memstats_mcache',
- 'memstats_sys',
- 'memstats_live_objects',
- 'memstats_gc_pauses',
-]
-
-MEMSTATS_CHARTS = {
- 'memstats_heap': {
- 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
- 'expvar.memstats.heap', 'line'],
- 'lines': [
- ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
- ['memstats_heap_inuse', 'inuse', 'absolute', 1, 1024]
- ]
- },
- 'memstats_stack': {
- 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
- 'expvar.memstats.stack', 'line'],
- 'lines': [
- ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
- ]
- },
- 'memstats_mspan': {
- 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
- 'expvar.memstats.mspan', 'line'],
- 'lines': [
- ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
- ]
- },
- 'memstats_mcache': {
- 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
- 'expvar.memstats.mcache', 'line'],
- 'lines': [
- ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
- ]
- },
- 'memstats_live_objects': {
- 'options': ['live_objects', 'memory: number of live objects', 'objects', 'memstats',
- 'expvar.memstats.live_objects', 'line'],
- 'lines': [
- ['memstats_live_objects', 'live']
- ]
- },
- 'memstats_sys': {
- 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
- 'expvar.memstats.sys', 'line'],
- 'lines': [
- ['memstats_sys', 'sys', 'absolute', 1, 1024]
- ]
- },
- 'memstats_gc_pauses': {
- 'options': ['gc_pauses', 'memory: average duration of GC pauses', 'ns', 'memstats',
- 'expvar.memstats.gc_pauses', 'line'],
- 'lines': [
- ['memstats_gc_pauses', 'avg']
- ]
- }
-}
-
-EXPVAR = namedtuple(
- "EXPVAR",
- [
- "key",
- "type",
- "id",
- ]
-)
-
-
-def flatten(d, top='', sep='.'):
- items = []
- for key, val in d.items():
- nkey = top + sep + key if top else key
- if isinstance(val, dict):
- items.extend(flatten(val, nkey, sep=sep).items())
- else:
- items.append((nkey, val))
- return dict(items)
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- # if memstats collection is enabled, add the charts and their order
- if self.configuration.get('collect_memstats'):
- self.definitions = dict(MEMSTATS_CHARTS)
- self.order = list(MEMSTATS_ORDER)
- else:
- self.definitions = dict()
- self.order = list()
-
- # if extra charts are defined, parse their config
- extra_charts = self.configuration.get('extra_charts')
- if extra_charts:
- self._parse_extra_charts_config(extra_charts)
-
- def check(self):
- """
- Check if the module can collect data:
- 1) At least one JOB configuration has to be specified
- 2) The JOB configuration needs to define the URL and either collect_memstats must be enabled or at least one
- extra_chart must be defined.
-
- The configuration and URL check is provided by the UrlService class.
- """
-
- if not (self.configuration.get('extra_charts') or self.configuration.get('collect_memstats')):
- self.error('Memstats collection is disabled and no extra_charts are defined, disabling module.')
- return False
-
- return UrlService.check(self)
-
- def _parse_extra_charts_config(self, extra_charts_config):
-
- # a place to store the expvar keys and their types
- self.expvars = list()
-
- for chart in extra_charts_config:
-
- chart_dict = dict()
- chart_id = chart.get('id')
- chart_lines = chart.get('lines')
- chart_opts = chart.get('options', dict())
-
- if not all([chart_id, chart_lines]):
- self.info('Chart {0} has no ID or no lines defined, skipping'.format(chart))
- continue
-
- chart_dict['options'] = [
- chart_opts.get('name', ''),
- chart_opts.get('title', ''),
- chart_opts.get('units', ''),
- chart_opts.get('family', ''),
- chart_opts.get('context', ''),
- chart_opts.get('chart_type', 'line')
- ]
- chart_dict['lines'] = list()
-
- # add the lines to the chart
- for line in chart_lines:
-
- ev_key = line.get('expvar_key')
- ev_type = line.get('expvar_type')
- line_id = line.get('id')
-
- if not all([ev_key, ev_type, line_id]):
- self.info('Line missing expvar_key, expvar_type, or line_id, skipping: {0}'.format(line))
- continue
-
- if ev_type not in ['int', 'float']:
- self.info('Unsupported expvar_type "{0}". Must be "int" or "float"'.format(ev_type))
- continue
-
- # self.expvars[ev_key] = (ev_type, line_id)
- self.expvars.append(EXPVAR(ev_key, ev_type, line_id))
-
- chart_dict['lines'].append(
- [
- line.get('id', ''),
- line.get('name', ''),
- line.get('algorithm', ''),
- line.get('multiplier', 1),
- line.get('divisor', 100 if ev_type == 'float' else 1),
- line.get('hidden', False)
- ]
- )
-
- self.order.append(chart_id)
- self.definitions[chart_id] = chart_dict
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- data = json.loads(raw_data)
-
- expvars = dict()
- if self.configuration.get('collect_memstats'):
- expvars.update(self._parse_memstats(data))
-
- if self.configuration.get('extra_charts'):
- # the memstats part of the data has been already parsed, so we remove it before flattening and checking
- # the rest of the data, thus avoiding needless iterating over the multiply nested memstats dict.
- del (data['memstats'])
- flattened = flatten(data)
-
- for ev in self.expvars:
- v = flattened.get(ev.key)
-
- if v is None:
- continue
-
- try:
- if ev.type == 'int':
- expvars[ev.id] = int(v)
- elif ev.type == 'float':
- expvars[ev.id] = float(v) * 100
- except ValueError:
- self.info('Failed to parse value for key {0} as {1}, ignoring key.'.format(ev.key, ev.type))
- return None
-
- return expvars
-
- @staticmethod
- def _parse_memstats(data):
-
- memstats = data['memstats']
-
- # calculate the number of live objects in memory
- live_objs = int(memstats['Mallocs']) - int(memstats['Frees'])
-
- # calculate GC pause times average
- # the Go runtime keeps the last 256 GC pause durations in a circular buffer,
- # so we need to filter out the 0 values before the buffer is filled
- gc_pauses = memstats['PauseNs']
- try:
- gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0])
- # no GC cycles have occurred yet
- except ZeroDivisionError:
- gc_pause_avg = 0
-
- return {
- 'memstats_heap_alloc': memstats['HeapAlloc'],
- 'memstats_heap_inuse': memstats['HeapInuse'],
- 'memstats_stack_inuse': memstats['StackInuse'],
- 'memstats_mspan_inuse': memstats['MSpanInuse'],
- 'memstats_mcache_inuse': memstats['MCacheInuse'],
- 'memstats_sys': memstats['Sys'],
- 'memstats_live_objects': live_objs,
- 'memstats_gc_pauses': gc_pause_avg,
- }
diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.conf b/collectors/python.d.plugin/go_expvar/go_expvar.conf
deleted file mode 100644
index 4b821cde9..000000000
--- a/collectors/python.d.plugin/go_expvar/go_expvar.conf
+++ /dev/null
@@ -1,108 +0,0 @@
-# netdata python.d.plugin configuration for go_expvar
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, this plugin also supports the following:
-#
-# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
-#
-# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
-# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# collect_memstats: true # enables charts for Go runtime's memory statistics
-# extra_charts: {} # defines extra data/charts to monitor, please see the example below
-#
-# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
-# collect.
-#
-# Please visit the module wiki page for more information on how to use the extra_charts variable:
-#
-# https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/go_expvar
-#
-# Configuration example
-# ---------------------
-
-#app1:
-# name : 'app1'
-# url : 'http://127.0.0.1:8080/debug/vars'
-# collect_memstats: true
-# extra_charts:
-# - id: "runtime_goroutines"
-# options:
-# name: num_goroutines
-# title: "runtime: number of goroutines"
-# units: goroutines
-# family: runtime
-# context: expvar.runtime.goroutines
-# chart_type: line
-# lines:
-# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
-# - id: "foo_counters"
-# options:
-# name: counters
-# title: "some random counters"
-# units: awesomeness
-# family: counters
-# context: expvar.foo.counters
-# chart_type: line
-# lines:
-# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
-# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
-
diff --git a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
deleted file mode 100644
index 8d61fa2ae..000000000
--- a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
+++ /dev/null
@@ -1,335 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/metadata.yaml"
-sidebar_label: "Go applications (EXPVAR)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/APM"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Go applications (EXPVAR)
-
-
-<img src="https://netdata.cloud/img/go.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: go_expvar
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.
-
-It connects via http to gather the metrics exposed via the `expvar` package.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Go applications (EXPVAR) instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| expvar.memstats.heap | alloc, inuse | KiB |
-| expvar.memstats.stack | inuse | KiB |
-| expvar.memstats.mspan | inuse | KiB |
-| expvar.memstats.mcache | inuse | KiB |
-| expvar.memstats.live_objects | live | objects |
-| expvar.memstats.sys | sys | KiB |
-| expvar.memstats.gc_pauses | avg | ns |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the go_expvar collector
-
-The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d.conf
-```
-
-Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-
-#### Sample `expvar` usage in a Go application
-
-The `expvar` package exposes metrics over HTTP and is very easy to use.
-Consider this minimal sample below:
-
-```go
-package main
-
-import (
- _ "expvar"
- "net/http"
-)
-
-func main() {
- http.ListenAndServe("127.0.0.1:8080", nil)
-}
-```
-
-When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
-exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
-the URL in your browser (or by using `wget` or `curl`).
-
-Sample output:
-
-```json
-{
-"cmdline": ["./expvar-demo-binary"],
-"memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
-}
-```
-
-You can of course expose and monitor your own variables as well.
-Here is a sample Go application that exposes a few custom variables:
-
-```go
-package main
-
-import (
- "expvar"
- "net/http"
- "runtime"
- "time"
-)
-
-func main() {
-
- tick := time.NewTicker(1 * time.Second)
- num_go := expvar.NewInt("runtime.goroutines")
- counters := expvar.NewMap("counters")
- counters.Set("cnt1", new(expvar.Int))
- counters.Set("cnt2", new(expvar.Float))
-
- go http.ListenAndServe(":8080", nil)
-
- for {
- select {
- case <- tick.C:
- num_go.Set(int64(runtime.NumGoroutine()))
- counters.Add("cnt1", 1)
- counters.AddFloat("cnt2", 1.452)
- }
- }
-}
-```
-
-Apart from the runtime memory stats, this application publishes two counters and the
-number of currently running Goroutines and updates these stats every second.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/go_expvar.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/go_expvar.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |
-| user | If the URL is password protected, this is the username to use. | | no |
-| pass | If the URL is password protected, this is the password to use. | | no |
-| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |
-| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |
-
-</details>
-
-#### Examples
-
-##### Monitor a Go app1 application
-
-The example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.
-
-The `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.
-
-The `extra_charts` variable is a YaML list of Netdata chart definitions.
-Each chart definition has the following keys:
-
-```
-id: Netdata chart ID
-options: a key-value mapping of chart options
-lines: a list of line definitions
-```
-
-**Note: please do not use dots in the chart or line ID field.
-See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
-
-Please see these two links to the official Netdata documentation for more information about the values:
-
-- [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart)
-- [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
-
-**Line definitions**
-
-Each chart can define multiple lines (dimensions).
-A line definition is a key-value mapping of line options.
-Each line can have the following options:
-
-```
-# mandatory
-expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
-expvar_type: value type; supported are "float" or "int"
-id: the id of this line/dimension in Netdata
-
-# optional - Netdata defaults are used if these options are not defined
-name: ''
-algorithm: absolute
-multiplier: 1
-divisor: 100 if expvar_type == float, 1 if expvar_type == int
-hidden: False
-```
-
-Please see the following link for more information about the options and their default values:
-[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension)
-
-Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
-All dicts in the resulting JSON document are then flattened to one level.
-Expvar names are joined together with '.' when flattening.
-
-Example:
-
-```
-{
- "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
- "runtime.goroutines": 5
-}
-```
-
-In the above case, the exported variables will be available under `runtime.goroutines`,
-`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
-the first defined key wins and all subsequent keys with the same name are ignored.
-
-
-```yaml
-app1:
- name : 'app1'
- url : 'http://127.0.0.1:8080/debug/vars'
- collect_memstats: true
- extra_charts:
- - id: "runtime_goroutines"
- options:
- name: num_goroutines
- title: "runtime: number of goroutines"
- units: goroutines
- family: runtime
- context: expvar.runtime.goroutines
- chart_type: line
- lines:
- - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
- - id: "foo_counters"
- options:
- name: counters
- title: "some random counters"
- units: awesomeness
- family: counters
- context: expvar.foo.counters
- chart_type: line
- lines:
- - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
- - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin go_expvar debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/go_expvar/metadata.yaml b/collectors/python.d.plugin/go_expvar/metadata.yaml
deleted file mode 100644
index 9419b024a..000000000
--- a/collectors/python.d.plugin/go_expvar/metadata.yaml
+++ /dev/null
@@ -1,329 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: go_expvar
- monitored_instance:
- name: Go applications (EXPVAR)
- link: "https://pkg.go.dev/expvar"
- categories:
- - data-collection.apm
- icon_filename: "go.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - go
- - expvar
- - application
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts."
- method_description: "It connects via http to gather the metrics exposed via the `expvar` package."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Enable the go_expvar collector"
- description: |
- The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
-
- Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
- - title: "Sample `expvar` usage in a Go application"
- description: |
- The `expvar` package exposes metrics over HTTP and is very easy to use.
- Consider this minimal sample below:
-
- ```go
- package main
-
- import (
- _ "expvar"
- "net/http"
- )
-
- func main() {
- http.ListenAndServe("127.0.0.1:8080", nil)
- }
- ```
-
- When imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that
- exposes Go runtime's memory statistics in JSON format. You can inspect the output by opening
- the URL in your browser (or by using `wget` or `curl`).
-
- Sample output:
-
- ```json
- {
- "cmdline": ["./expvar-demo-binary"],
- "memstats": {"Alloc":630856,"TotalAlloc":630856,"Sys":3346432,"Lookups":27, <omitted for brevity>}
- }
- ```
-
- You can of course expose and monitor your own variables as well.
- Here is a sample Go application that exposes a few custom variables:
-
- ```go
- package main
-
- import (
- "expvar"
- "net/http"
- "runtime"
- "time"
- )
-
- func main() {
-
- tick := time.NewTicker(1 * time.Second)
- num_go := expvar.NewInt("runtime.goroutines")
- counters := expvar.NewMap("counters")
- counters.Set("cnt1", new(expvar.Int))
- counters.Set("cnt2", new(expvar.Float))
-
- go http.ListenAndServe(":8080", nil)
-
- for {
- select {
- case <- tick.C:
- num_go.Set(int64(runtime.NumGoroutine()))
- counters.Add("cnt1", 1)
- counters.AddFloat("cnt2", 1.452)
- }
- }
- }
- ```
-
- Apart from the runtime memory stats, this application publishes two counters and the
- number of currently running Goroutines and updates these stats every second.
- configuration:
- file:
- name: python.d/go_expvar.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: url
- description: the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
- default_value: ""
- required: true
- - name: user
- description: If the URL is password protected, this is the username to use.
- default_value: ""
- required: false
- - name: pass
- description: If the URL is password protected, this is the password to use.
- default_value: ""
- required: false
- - name: collect_memstats
- description: Enables charts for Go runtime's memory statistics.
- default_value: ""
- required: false
- - name: extra_charts
- description: Defines extra data/charts to monitor, please see the example below.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Monitor a Go app1 application
- description: |
- The example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.
-
- The `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.
-
- The `extra_charts` variable is a YaML list of Netdata chart definitions.
- Each chart definition has the following keys:
-
- ```
- id: Netdata chart ID
- options: a key-value mapping of chart options
- lines: a list of line definitions
- ```
-
- **Note: please do not use dots in the chart or line ID field.
- See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**
-
- Please see these two links to the official Netdata documentation for more information about the values:
-
- - [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart)
- - [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
-
- **Line definitions**
-
- Each chart can define multiple lines (dimensions).
- A line definition is a key-value mapping of line options.
- Each line can have the following options:
-
- ```
- # mandatory
- expvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint
- expvar_type: value type; supported are "float" or "int"
- id: the id of this line/dimension in Netdata
-
- # optional - Netdata defaults are used if these options are not defined
- name: ''
- algorithm: absolute
- multiplier: 1
- divisor: 100 if expvar_type == float, 1 if expvar_type == int
- hidden: False
- ```
-
- Please see the following link for more information about the options and their default values:
- [External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension)
-
- Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
- All dicts in the resulting JSON document are then flattened to one level.
- Expvar names are joined together with '.' when flattening.
-
- Example:
-
- ```
- {
- "counters": {"cnt1": 1042, "cnt2": 1512.9839999999983},
- "runtime.goroutines": 5
- }
- ```
-
- In the above case, the exported variables will be available under `runtime.goroutines`,
- `counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,
- the first defined key wins and all subsequent keys with the same name are ignored.
- config: |
- app1:
- name : 'app1'
- url : 'http://127.0.0.1:8080/debug/vars'
- collect_memstats: true
- extra_charts:
- - id: "runtime_goroutines"
- options:
- name: num_goroutines
- title: "runtime: number of goroutines"
- units: goroutines
- family: runtime
- context: expvar.runtime.goroutines
- chart_type: line
- lines:
- - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
- - id: "foo_counters"
- options:
- name: counters
- title: "some random counters"
- units: awesomeness
- family: counters
- context: expvar.foo.counters
- chart_type: line
- lines:
- - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
- - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: expvar.memstats.heap
- description: "memory: size of heap memory structures"
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: alloc
- - name: inuse
- - name: expvar.memstats.stack
- description: "memory: size of stack memory structures"
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: inuse
- - name: expvar.memstats.mspan
- description: "memory: size of mspan memory structures"
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: inuse
- - name: expvar.memstats.mcache
- description: "memory: size of mcache memory structures"
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: inuse
- - name: expvar.memstats.live_objects
- description: "memory: number of live objects"
- unit: "objects"
- chart_type: line
- dimensions:
- - name: live
- - name: expvar.memstats.sys
- description: "memory: size of reserved virtual address space"
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: sys
- - name: expvar.memstats.gc_pauses
- description: "memory: average duration of GC pauses"
- unit: "ns"
- chart_type: line
- dimensions:
- - name: avg
diff --git a/collectors/python.d.plugin/haproxy/Makefile.inc b/collectors/python.d.plugin/haproxy/Makefile.inc
deleted file mode 100644
index ad24deaa0..000000000
--- a/collectors/python.d.plugin/haproxy/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += haproxy/haproxy.chart.py
-dist_pythonconfig_DATA += haproxy/haproxy.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += haproxy/README.md haproxy/Makefile.inc
-
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
deleted file mode 100644
index 2fa203f60..000000000
--- a/collectors/python.d.plugin/haproxy/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-<!--
-title: "HAProxy monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md"
-sidebar_label: "haproxy-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Webapps"
--->
-
-# HAProxy collector
-
-Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
-And health metrics such as backend servers status (server check should be used).
-
-Plugin can obtain data from URL or Unix socket.
-
-Requirement:
-
-- Socket must be readable and writable by the `netdata` user.
-- URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.
-
-It produces:
-
-1. **Frontend** family charts
-
- - Kilobytes in/s
- - Kilobytes out/s
- - Sessions current
- - Sessions in queue current
-
-2. **Backend** family charts
-
- - Kilobytes in/s
- - Kilobytes out/s
- - Sessions current
- - Sessions in queue current
-
-3. **Health** chart
-
- - number of failed servers for every backend (in DOWN state)
-
-## Configuration
-
-Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
-directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/haproxy.conf
-```
-
-Sample:
-
-```yaml
-via_url:
- user: 'username' # ONLY IF stats auth is used
- pass: 'password' # # ONLY IF stats auth is used
- url: 'http://ip.address:port/url;csv;norefresh'
-```
-
-OR
-
-```yaml
-via_socket:
- socket: 'path/to/haproxy/sock'
-```
-
-If no configuration is given, module will fail to run.
-
-
-### Troubleshooting
-
-To troubleshoot issues with the `haproxy` module, run the `python.d.plugin` with the debug option enabled. The
-output will give you the output of the data collection job or error messages on why the collector isn't working.
-
-First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
-not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
-plugin's directory, switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-Now you can manually run the `haproxy` module in debug mode:
-
-```bash
-./python.d.plugin haproxy debug trace
-```
-
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
deleted file mode 100644
index f412febb7..000000000
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: haproxy netdata python.d module
-# Author: ilyam8, ktarasz
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from collections import defaultdict
-from re import compile as re_compile
-
-try:
- from urlparse import urlparse
-except ImportError:
- from urllib.parse import urlparse
-
-from bases.FrameworkServices.SocketService import SocketService
-from bases.FrameworkServices.UrlService import UrlService
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'fbin',
- 'fbout',
- 'fscur',
- 'fqcur',
- 'fhrsp_1xx',
- 'fhrsp_2xx',
- 'fhrsp_3xx',
- 'fhrsp_4xx',
- 'fhrsp_5xx',
- 'fhrsp_other',
- 'fhrsp_total',
- 'bbin',
- 'bbout',
- 'bscur',
- 'bqcur',
- 'bhrsp_1xx',
- 'bhrsp_2xx',
- 'bhrsp_3xx',
- 'bhrsp_4xx',
- 'bhrsp_5xx',
- 'bhrsp_other',
- 'bhrsp_total',
- 'bqtime',
- 'bttime',
- 'brtime',
- 'bctime',
- 'health_sup',
- 'health_sdown',
- 'health_smaint',
- 'health_bdown',
- 'health_idle'
-]
-
-CHARTS = {
- 'fbin': {
- 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
- 'lines': []
- },
- 'fbout': {
- 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
- 'lines': []
- },
- 'fscur': {
- 'options': [None, 'Sessions Active', 'sessions', 'frontend', 'haproxy_f.scur', 'line'],
- 'lines': []
- },
- 'fqcur': {
- 'options': [None, 'Session In Queue', 'sessions', 'frontend', 'haproxy_f.qcur', 'line'],
- 'lines': []
- },
- 'fhrsp_1xx': {
- 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_1xx', 'line'],
- 'lines': []
- },
- 'fhrsp_2xx': {
- 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_2xx', 'line'],
- 'lines': []
- },
- 'fhrsp_3xx': {
- 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_3xx', 'line'],
- 'lines': []
- },
- 'fhrsp_4xx': {
- 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_4xx', 'line'],
- 'lines': []
- },
- 'fhrsp_5xx': {
- 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'frontend', 'haproxy_f.hrsp_5xx', 'line'],
- 'lines': []
- },
- 'fhrsp_other': {
- 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'frontend',
- 'haproxy_f.hrsp_other', 'line'],
- 'lines': []
- },
- 'fhrsp_total': {
- 'options': [None, 'HTTP responses', 'responses', 'frontend', 'haproxy_f.hrsp_total', 'line'],
- 'lines': []
- },
- 'bbin': {
- 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
- 'lines': []
- },
- 'bbout': {
- 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
- 'lines': []
- },
- 'bscur': {
- 'options': [None, 'Sessions Active', 'sessions', 'backend', 'haproxy_b.scur', 'line'],
- 'lines': []
- },
- 'bqcur': {
- 'options': [None, 'Sessions In Queue', 'sessions', 'backend', 'haproxy_b.qcur', 'line'],
- 'lines': []
- },
- 'bhrsp_1xx': {
- 'options': [None, 'HTTP responses with 1xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_1xx', 'line'],
- 'lines': []
- },
- 'bhrsp_2xx': {
- 'options': [None, 'HTTP responses with 2xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_2xx', 'line'],
- 'lines': []
- },
- 'bhrsp_3xx': {
- 'options': [None, 'HTTP responses with 3xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_3xx', 'line'],
- 'lines': []
- },
- 'bhrsp_4xx': {
- 'options': [None, 'HTTP responses with 4xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_4xx', 'line'],
- 'lines': []
- },
- 'bhrsp_5xx': {
- 'options': [None, 'HTTP responses with 5xx code', 'responses/s', 'backend', 'haproxy_b.hrsp_5xx', 'line'],
- 'lines': []
- },
- 'bhrsp_other': {
- 'options': [None, 'HTTP responses with other codes (protocol error)', 'responses/s', 'backend',
- 'haproxy_b.hrsp_other', 'line'],
- 'lines': []
- },
- 'bhrsp_total': {
- 'options': [None, 'HTTP responses (total)', 'responses/s', 'backend', 'haproxy_b.hrsp_total', 'line'],
- 'lines': []
- },
- 'bqtime': {
- 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.qtime', 'line'],
- 'lines': []
- },
- 'bctime': {
- 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.ctime', 'line'],
- 'lines': []
- },
- 'brtime': {
- 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.rtime', 'line'],
- 'lines': []
- },
- 'bttime': {
- 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
- 'haproxy_b.ttime', 'line'],
- 'lines': []
- },
- 'health_sdown': {
- 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
- 'lines': []
- },
- 'health_sup': {
- 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
- 'lines': []
- },
- 'health_smaint': {
- 'options': [None, 'Backend Servers In MAINT State', 'maintenance servers', 'health', 'haproxy_hs.maint', 'line'],
- 'lines': []
- },
- 'health_bdown': {
- 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
- 'lines': []
- },
- 'health_idle': {
- 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
- 'lines': [
- ['idle', None, 'absolute']
- ]
- }
-}
-
-METRICS = {
- 'bin': {'algorithm': 'incremental', 'divisor': 1024},
- 'bout': {'algorithm': 'incremental', 'divisor': 1024},
- 'scur': {'algorithm': 'absolute', 'divisor': 1},
- 'qcur': {'algorithm': 'absolute', 'divisor': 1},
- 'hrsp_1xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_2xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_3xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_4xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_5xx': {'algorithm': 'incremental', 'divisor': 1},
- 'hrsp_other': {'algorithm': 'incremental', 'divisor': 1}
-}
-
-BACKEND_METRICS = {
- 'qtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ctime': {'algorithm': 'absolute', 'divisor': 1},
- 'rtime': {'algorithm': 'absolute', 'divisor': 1},
- 'ttime': {'algorithm': 'absolute', 'divisor': 1}
-}
-
-REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
- socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
-
-
-# TODO: the code is unreadable
-class Service(UrlService, SocketService):
- def __init__(self, configuration=None, name=None):
- if 'socket' in configuration:
- SocketService.__init__(self, configuration=configuration, name=name)
- self.poll = SocketService
- self.options_ = dict(regex=REGEX['socket'],
- stat='show stat\n'.encode(),
- info='show info\n'.encode())
- else:
- UrlService.__init__(self, configuration=configuration, name=name)
- self.poll = UrlService
- self.options_ = dict(regex=REGEX['url'],
- stat=self.url,
- info=url_remove_params(self.url))
- self.order = ORDER
- self.definitions = CHARTS
-
- def check(self):
- if self.poll.check(self):
- self.create_charts()
- self.info('We are using %s.' % self.poll.__name__)
- return True
- return False
-
- def _get_data(self):
- to_netdata = dict()
- self.request, self.url = self.options_['stat'], self.options_['stat']
- stat_data = self._get_stat_data()
- self.request, self.url = self.options_['info'], self.options_['info']
- info_data = self._get_info_data(regex=self.options_['regex'])
-
- to_netdata.update(stat_data)
- to_netdata.update(info_data)
- return to_netdata or None
-
- def _get_stat_data(self):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
-
- if not raw_data:
- return dict()
-
- raw_data = raw_data.splitlines()
- self.data = parse_data_([dict(zip(raw_data[0].split(','), raw_data[_].split(',')))
- for _ in range(1, len(raw_data))])
- if not self.data:
- return dict()
-
- stat_data = dict()
-
- for frontend in self.data['frontend']:
- for metric in METRICS:
- idx = frontend['# pxname'].replace('.', '_')
- stat_data['_'.join(['frontend', metric, idx])] = frontend.get(metric) or 0
-
- for backend in self.data['backend']:
- name, idx = backend['# pxname'], backend['# pxname'].replace('.', '_')
- stat_data['hsup_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'UP')])
- stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'DOWN')])
- stat_data['hsmaint_' + idx] = len([server for server in self.data['servers']
- if server_status(server, name, 'MAINT')])
- stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
- for metric in BACKEND_METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- hrsp_total = 0
- for metric in METRICS:
- stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
- if metric.startswith('hrsp_'):
- hrsp_total += int(backend.get(metric) or 0)
- stat_data['_'.join(['backend', 'hrsp_total', idx])] = hrsp_total
- return stat_data
-
- def _get_info_data(self, regex):
- """
- :return: dict
- """
- raw_data = self.poll._get_raw_data(self)
- if not raw_data:
- return dict()
-
- match = regex.search(raw_data)
- return match.groupdict() if match else dict()
-
- @staticmethod
- def _check_raw_data(data):
- """
- Check if all data has been gathered from socket
- :param data: str
- :return: boolean
- """
- return not bool(data)
-
- def create_charts(self):
- for front in self.data['frontend']:
- name, idx = front['# pxname'], front['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['f' + metric]['lines'].append(['_'.join(['frontend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['fhrsp_total']['lines'].append(['_'.join(['frontend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for back in self.data['backend']:
- name, idx = back['# pxname'], back['# pxname'].replace('.', '_')
- for metric in METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, METRICS[metric]['algorithm'], 1,
- METRICS[metric]['divisor']])
- self.definitions['bhrsp_total']['lines'].append(['_'.join(['backend', 'hrsp_total', idx]),
- name, 'incremental', 1, 1])
- for metric in BACKEND_METRICS:
- self.definitions['b' + metric]['lines'].append(['_'.join(['backend', metric, idx]),
- name, BACKEND_METRICS[metric]['algorithm'], 1,
- BACKEND_METRICS[metric]['divisor']])
- self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
- self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
- self.definitions['health_smaint']['lines'].append(['hsmaint_' + idx, name, 'absolute'])
- self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
-
-
-def parse_data_(data):
- def is_backend(backend):
- return backend.get('svname') == 'BACKEND' and backend.get('# pxname') != 'stats'
-
- def is_frontend(frontend):
- return frontend.get('svname') == 'FRONTEND' and frontend.get('# pxname') != 'stats'
-
- def is_server(server):
- return not server.get('svname', '').startswith(('FRONTEND', 'BACKEND'))
-
- if not data:
- return None
-
- result = defaultdict(list)
- for elem in data:
- if is_backend(elem):
- result['backend'].append(elem)
- continue
- elif is_frontend(elem):
- result['frontend'].append(elem)
- continue
- elif is_server(elem):
- result['servers'].append(elem)
-
- return result or None
-
-
-def server_status(server, backend_name, status='DOWN'):
- return server.get('# pxname') == backend_name and server.get('status').partition(' ')[0] == status
-
-
-def url_remove_params(url):
- parsed = urlparse(url or str())
- return '{scheme}://{netloc}{path}'.format(scheme=parsed.scheme, netloc=parsed.netloc, path=parsed.path)
diff --git a/collectors/python.d.plugin/haproxy/haproxy.conf b/collectors/python.d.plugin/haproxy/haproxy.conf
deleted file mode 100644
index 10a0df3c3..000000000
--- a/collectors/python.d.plugin/haproxy/haproxy.conf
+++ /dev/null
@@ -1,83 +0,0 @@
-# netdata python.d.plugin configuration for haproxy
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, haproxy also supports the following:
-#
-# IMPORTANT: socket MUST BE readable AND writable by netdata user
-#
-# socket: 'path/to/haproxy/sock'
-#
-# OR
-# url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
-# [user: USERNAME] only if stats auth is used
-# [pass: PASSWORD] only if stats auth is used
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-#via_url:
-# user : 'admin'
-# pass : 'password'
-# url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
-
-#via_socket:
-# socket: '/var/run/haproxy/admin.sock'
diff --git a/collectors/python.d.plugin/haproxy/metadata.yaml b/collectors/python.d.plugin/haproxy/metadata.yaml
deleted file mode 100644
index 82ab37d26..000000000
--- a/collectors/python.d.plugin/haproxy/metadata.yaml
+++ /dev/null
@@ -1,322 +0,0 @@
-# This collector will not appear in documentation, as the go version is preferred,
-# https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/README.md
-#
-#
-# meta:
-# plugin_name: python.d.plugin
-# module_name: haproxy
-# monitored_instance:
-# name: HAProxy
-# link: 'https://www.haproxy.org/'
-# categories:
-# - data-collection.web-servers-and-web-proxies
-# icon_filename: 'haproxy.png'
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ''
-# keywords:
-# - haproxy
-# - tcp
-# - balancer
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: 'This collector monitors HAProxy metrics about frontend servers, backend servers, responses and more.'
-# method_description: 'It connects to the HAProxy instance via URL or UNIX socket.'
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ''
-# default_behavior:
-# auto_detection:
-# description: ''
-# limits:
-# description: ''
-# performance_impact:
-# description: ''
-# setup:
-# prerequisites:
-# list:
-# - title: 'HAProxy setup for socket'
-# description: 'Socket must be readable and writable by the netdata user.'
-# - title: 'HAProxy setup for URL'
-# description: 'URL must have `stats uri <path>` present in the haproxy config, otherwise you will get HTTP 503 in the haproxy logs.'
-# configuration:
-# file:
-# name: python.d/haproxy.conf
-# options:
-# description: |
-# There are 2 sections:
-
-# * Global variables
-# * One or more JOBS that can define multiple different instances to monitor.
-
-# The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-# Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-# Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-# folding:
-# title: "Config options"
-# enabled: true
-# list:
-# - name: update_every
-# description: Sets the default data collection frequency.
-# default_value: 5
-# required: false
-# - name: priority
-# description: Controls the order of charts at the netdata dashboard.
-# default_value: 60000
-# required: false
-# - name: autodetection_retry
-# description: Sets the job re-check interval in seconds.
-# default_value: 0
-# required: false
-# - name: penalty
-# description: Indicates whether to apply penalty to update_every in case of failures.
-# default_value: yes
-# required: false
-# - name: name
-# description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
-# default_value: ''
-# required: false
-# - name: user
-# description: Username if stats auth is used.
-# default_value: ''
-# required: false
-# - name: pass
-# description: Password if stats auth is used.
-# default_value: ''
-# required: false
-# - name: url
-# description: URL to the haproxy_stats endpoint. Also make sure the parameters `csv` and `norefresh` are provided.
-# default_value: ''
-# required: false
-# - name: socket
-# description: Unix socket path to the haproxy sock file.
-# default_value: ''
-# required: false
-# examples:
-# folding:
-# enabled: true
-# title: "Config"
-# list:
-# - name: URL method
-# description: Use a URL to specify the endpoint to check for haproxy statistics.
-# config: |
-# via_url:
-# user: 'username' # ONLY IF stats auth is used
-# pass: 'password' # # ONLY IF stats auth is used
-# url: 'http://ip.address:port/url;csv;norefresh'
-# - name: Local socket
-# description: Use a local socket to check for haproxy statistics.
-# config: |
-# via_socket:
-# socket: 'path/to/haproxy/sock'
-# troubleshooting:
-# problems:
-# list: []
-# alerts:
-# - name: haproxy_backend_server_status
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
-# metric: haproxy_hs.down
-# info: average number of failed haproxy backend servers over the last 10 seconds
-# - name: haproxy_backend_status
-# link: https://github.com/netdata/netdata/blob/master/health/health.d/haproxy.conf
-# metric: haproxy_hb.down
-# info: average number of failed haproxy backends over the last 10 seconds
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: global
-# description: 'These metrics refer to the entire monitored application.'
-# labels: []
-# metrics:
-# - name: haproxy_f.bin
-# description: Kilobytes In
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.bout
-# description: Kilobytes Out
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.scur
-# description: Sessions Active
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.qcur
-# description: Session In Queue
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_1xx
-# description: HTTP responses with 1xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_2xx
-# description: HTTP responses with 2xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_3xx
-# description: HTTP responses with 3xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_4xx
-# description: HTTP responses with 4xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_5xx
-# description: HTTP responses with 5xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_other
-# description: HTTP responses with other codes (protocol error)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_f.hrsp_total
-# description: HTTP responses
-# unit: "responses"
-# chart_type: line
-# dimensions:
-# - name: a dimension per frontend server
-# - name: haproxy_b.bin
-# description: Kilobytes In
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.bout
-# description: Kilobytes Out
-# unit: "KiB/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.scur
-# description: Sessions Active
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.qcur
-# description: Sessions In Queue
-# unit: "sessions"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_1xx
-# description: HTTP responses with 1xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_2xx
-# description: HTTP responses with 2xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_3xx
-# description: HTTP responses with 3xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_4xx
-# description: HTTP responses with 4xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_5xx
-# description: HTTP responses with 5xx code
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_other
-# description: HTTP responses with other codes (protocol error)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.hrsp_total
-# description: HTTP responses (total)
-# unit: "responses/s"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.qtime
-# description: The average queue time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.ctime
-# description: The average connect time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.rtime
-# description: The average response time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_b.ttime
-# description: The average total session time over the 1024 last requests
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hs.down
-# description: Backend Servers In DOWN State
-# unit: "failed servers"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hs.up
-# description: Backend Servers In UP State
-# unit: "health servers"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy_hb.down
-# description: Is Backend Failed?
-# unit: "boolean"
-# chart_type: line
-# dimensions:
-# - name: a dimension per backend server
-# - name: haproxy.idle
-# description: The Ratio Of Polling Time Vs Total Time
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: idle
diff --git a/collectors/python.d.plugin/hddtemp/Makefile.inc b/collectors/python.d.plugin/hddtemp/Makefile.inc
deleted file mode 100644
index 22852b646..000000000
--- a/collectors/python.d.plugin/hddtemp/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += hddtemp/hddtemp.chart.py
-dist_pythonconfig_DATA += hddtemp/hddtemp.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += hddtemp/README.md hddtemp/Makefile.inc
-
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
deleted file mode 120000
index 95c7593f8..000000000
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/hdd_temperature.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py b/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
deleted file mode 100644
index 6427aa180..000000000
--- a/collectors/python.d.plugin/hddtemp/hddtemp.chart.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: hddtemp netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-import re
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'temperatures',
-]
-
-CHARTS = {
- 'temperatures': {
- 'options': ['disks_temp', 'Disks Temperatures', 'Celsius', 'temperatures', 'hddtemp.temperatures', 'line'],
- 'lines': [
- # lines are created dynamically in `check()` method
- ]}}
-
-RE = re.compile(r'\/dev\/([^|]+)\|([^|]+)\|([0-9]+|SLP|UNK)\|')
-
-
-class Disk:
- def __init__(self, id_, name, temp):
- self.id = id_.split('/')[-1]
- self.name = name.replace(' ', '_')
- self.temp = temp if temp.isdigit() else None
-
- def __repr__(self):
- return self.id
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.do_only = self.configuration.get('devices')
- self._keep_alive = False
- self.request = ""
- self.host = "127.0.0.1"
- self.port = 7634
-
- def get_disks(self):
- r = self._get_raw_data()
-
- if not r:
- return None
-
- m = RE.findall(r)
-
- if not m:
- self.error("received data doesn't have needed records")
- return None
-
- rv = [Disk(*d) for d in m]
- self.debug('available disks: {0}'.format(rv))
-
- if self.do_only:
- return [v for v in rv if v.id in self.do_only]
- return rv
-
- def get_data(self):
- """
- Get data from TCP/IP socket
- :return: dict
- """
-
- disks = self.get_disks()
-
- if not disks:
- return None
-
- return dict((d.id, d.temp) for d in disks)
-
- def check(self):
- """
- Parse configuration, check if hddtemp is available, and dynamically create chart lines data
- :return: boolean
- """
- self._parse_config()
- disks = self.get_disks()
-
- if not disks:
- return False
-
- for d in disks:
- dim = [d.id]
- self.definitions['temperatures']['lines'].append(dim)
-
- return True
-
- @staticmethod
- def _check_raw_data(data):
- return not bool(data)
diff --git a/collectors/python.d.plugin/hddtemp/hddtemp.conf b/collectors/python.d.plugin/hddtemp/hddtemp.conf
deleted file mode 100644
index b2d7aef63..000000000
--- a/collectors/python.d.plugin/hddtemp/hddtemp.conf
+++ /dev/null
@@ -1,95 +0,0 @@
-# netdata python.d.plugin configuration for hddtemp
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, hddtemp also supports the following:
-#
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-
-# By default this module will try to autodetect disks
-# (autodetection works only for disk which names start with "sd").
-# However this can be overridden by setting variable `disks` to
-# array of desired disks. Example for two disks:
-#
-# devices:
-# - sda
-# - sdb
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name: 'local'
- host: 'localhost'
- port: 7634
-
-localipv4:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
-
-localipv6:
- name: 'local'
- host: '::1'
- port: 7634
diff --git a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
deleted file mode 100644
index 4a1504f07..000000000
--- a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
+++ /dev/null
@@ -1,217 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/metadata.yaml"
-sidebar_label: "HDD temperature"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# HDD temperature
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: hddtemp
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors disk temperatures.
-
-
-It uses the `hddtemp` daemon to gather the metrics.
-
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per HDD temperature instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| hddtemp.temperatures | a dimension per disk | Celsius |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Run `hddtemp` in daemon mode
-
-You can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.
-
-So running `hddtemp -d` would run the daemon, by default on port 7634.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/hddtemp.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/hddtemp.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-By default this collector will try to autodetect disks (autodetection works only for disk which names start with "sd"). However this can be overridden by setting the option `disks` to an array of desired disks.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
-| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |
-| host | The IP or HOSTNAME to connect to. | localhost | yes |
-| port | The port to connect to. | 7634 | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
-
-```
-##### Custom disk names
-
-An example defining the disk names to detect.
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
- devices:
- - customdisk1
- - customdisk2
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
-
-remote_job:
- name : 'remote'
- host : 'http://192.0.2.1:2812'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin hddtemp debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/hddtemp/metadata.yaml b/collectors/python.d.plugin/hddtemp/metadata.yaml
deleted file mode 100644
index d8b56fc66..000000000
--- a/collectors/python.d.plugin/hddtemp/metadata.yaml
+++ /dev/null
@@ -1,163 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: hddtemp
- monitored_instance:
- name: HDD temperature
- link: https://linux.die.net/man/8/hddtemp
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - hardware
- - hdd temperature
- - disk temperature
- - temperature
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors disk temperatures.
- method_description: |
- It uses the `hddtemp` daemon to gather the metrics.
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Run `hddtemp` in daemon mode
- description: |
- You can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.
-
- So running `hddtemp -d` would run the daemon, by default on port 7634.
- configuration:
- file:
- name: "python.d/hddtemp.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
- By default this collector will try to autodetect disks (autodetection works only for disk which names start with "sd"). However this can be overridden by setting the option `disks` to an array of desired disks.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: "local"
- required: false
- - name: devices
- description: Array of desired disks to detect, in case their name doesn't start with `sd`.
- default_value: ""
- required: false
- - name: host
- description: The IP or HOSTNAME to connect to.
- default_value: "localhost"
- required: true
- - name: port
- description: The port to connect to.
- default_value: 7634
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
- - name: Custom disk names
- description: An example defining the disk names to detect.
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
- devices:
- - customdisk1
- - customdisk2
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 7634
-
- remote_job:
- name : 'remote'
- host : 'http://192.0.2.1:2812'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: hddtemp.temperatures
- description: Disk Temperatures
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per disk
diff --git a/collectors/python.d.plugin/hpssa/Makefile.inc b/collectors/python.d.plugin/hpssa/Makefile.inc
deleted file mode 100644
index 1c04aa49c..000000000
--- a/collectors/python.d.plugin/hpssa/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += hpssa/hpssa.chart.py
-dist_pythonconfig_DATA += hpssa/hpssa.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += hpssa/README.md hpssa/Makefile.inc
-
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
deleted file mode 120000
index 82802d8b4..000000000
--- a/collectors/python.d.plugin/hpssa/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/hp_smart_storage_arrays.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/hpssa/hpssa.chart.py b/collectors/python.d.plugin/hpssa/hpssa.chart.py
deleted file mode 100644
index 66be00837..000000000
--- a/collectors/python.d.plugin/hpssa/hpssa.chart.py
+++ /dev/null
@@ -1,396 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: hpssa netdata python.d module
-# Author: Peter Gnodde (gnoddep)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-from copy import deepcopy
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-disabled_by_default = True
-update_every = 5
-
-ORDER = [
- 'ctrl_status',
- 'ctrl_temperature',
- 'ld_status',
- 'pd_status',
- 'pd_temperature',
-]
-
-CHARTS = {
- 'ctrl_status': {
- 'options': [
- None,
- 'Status 1 is OK, Status 0 is not OK',
- 'Status',
- 'Controller',
- 'hpssa.ctrl_status',
- 'line'
- ],
- 'lines': []
- },
- 'ctrl_temperature': {
- 'options': [
- None,
- 'Temperature',
- 'Celsius',
- 'Controller',
- 'hpssa.ctrl_temperature',
- 'line'
- ],
- 'lines': []
- },
- 'ld_status': {
- 'options': [
- None,
- 'Status 1 is OK, Status 0 is not OK',
- 'Status',
- 'Logical drives',
- 'hpssa.ld_status',
- 'line'
- ],
- 'lines': []
- },
- 'pd_status': {
- 'options': [
- None,
- 'Status 1 is OK, Status 0 is not OK',
- 'Status',
- 'Physical drives',
- 'hpssa.pd_status',
- 'line'
- ],
- 'lines': []
- },
- 'pd_temperature': {
- 'options': [
- None,
- 'Temperature',
- 'Celsius',
- 'Physical drives',
- 'hpssa.pd_temperature',
- 'line'
- ],
- 'lines': []
- }
-}
-
-adapter_regex = re.compile(r'^(?P<adapter_type>.+) in Slot (?P<slot>\d+)')
-ignored_sections_regex = re.compile(
- r'''
- ^
- Physical[ ]Drives
- | None[ ]attached
- | (?:Expander|Enclosure|SEP|Port[ ]Name:)[ ].+
- | .+[ ]at[ ]Port[ ]\S+,[ ]Box[ ]\d+,[ ].+
- | Mirror[ ]Group[ ]\d+:
- $
- ''',
- re.X
-)
-mirror_group_regex = re.compile(r'^Mirror Group \d+:$')
-disk_partition_regex = re.compile(r'^Disk Partition Information$')
-array_regex = re.compile(r'^Array: (?P<id>[A-Z]+)$')
-drive_regex = re.compile(
- r'''
- ^
- Logical[ ]Drive:[ ](?P<logical_drive_id>\d+)
- | physicaldrive[ ](?P<fqn>[^:]+:\d+:\d+)
- $
- ''',
- re.X
-)
-key_value_regex = re.compile(r'^(?P<key>[^:]+): ?(?P<value>.*)$')
-ld_status_regex = re.compile(r'^Status: (?P<status>[^,]+)(?:, (?P<percentage>[0-9.]+)% complete)?$')
-error_match = re.compile(r'Error:')
-
-
-class HPSSAException(Exception):
- pass
-
-
-class HPSSA(object):
- def __init__(self, lines):
- self.lines = [line.strip() for line in lines if line.strip()]
- self.current_line = 0
- self.adapters = []
- self.parse()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self.current_line == len(self.lines):
- raise StopIteration
-
- line = self.lines[self.current_line]
- self.current_line += 1
-
- return line
-
- def next(self):
- """
- This is for Python 2.7 compatibility
- """
- return self.__next__()
-
- def rewind(self):
- self.current_line = max(self.current_line - 1, 0)
-
- @staticmethod
- def match_any(line, *regexes):
- return any([regex.match(line) for regex in regexes])
-
- def parse(self):
- for line in self:
- match = adapter_regex.match(line)
- if match:
- self.adapters.append(self.parse_adapter(**match.groupdict()))
-
- def parse_adapter(self, slot, adapter_type):
- adapter = {
- 'slot': int(slot),
- 'type': adapter_type,
-
- 'controller': {
- 'status': None,
- 'temperature': None,
- },
- 'cache': {
- 'present': False,
- 'status': None,
- 'temperature': None,
- },
- 'battery': {
- 'status': None,
- 'count': 0,
- },
-
- 'logical_drives': [],
- 'physical_drives': [],
- }
-
- for line in self:
- if error_match.match(line):
- raise HPSSAException('Error: {}'.format(line))
- elif adapter_regex.match(line):
- self.rewind()
- break
- elif array_regex.match(line):
- self.parse_array(adapter)
- elif line in ('Unassigned', 'unassigned') or line == 'HBA Drives':
- self.parse_physical_drives(adapter)
- elif ignored_sections_regex.match(line):
- self.parse_ignored_section()
- else:
- match = key_value_regex.match(line)
- if match:
- key, value = match.group('key', 'value')
- if key == 'Controller Status':
- adapter['controller']['status'] = value == 'OK'
- elif key == 'Controller Temperature (C)':
- adapter['controller']['temperature'] = int(value)
- elif key == 'Cache Board Present':
- adapter['cache']['present'] = value == 'True'
- elif key == 'Cache Status':
- adapter['cache']['status'] = value == 'OK'
- elif key == 'Cache Module Temperature (C)':
- adapter['cache']['temperature'] = int(value)
- elif key == 'Battery/Capacitor Count':
- adapter['battery']['count'] = int(value)
- elif key == 'Battery/Capacitor Status':
- adapter['battery']['status'] = value == 'OK'
- else:
- raise HPSSAException('Cannot parse line: {}'.format(line))
-
- return adapter
-
- def parse_array(self, adapter):
- for line in self:
- if HPSSA.match_any(line, adapter_regex, array_regex, ignored_sections_regex):
- self.rewind()
- break
-
- match = drive_regex.match(line)
- if match:
- data = match.groupdict()
- if data['logical_drive_id']:
- self.parse_logical_drive(adapter, int(data['logical_drive_id']))
- else:
- self.parse_physical_drive(adapter, data['fqn'])
- elif not key_value_regex.match(line):
- self.rewind()
- break
-
- def parse_physical_drives(self, adapter):
- for line in self:
- match = drive_regex.match(line)
- if match:
- self.parse_physical_drive(adapter, match.group('fqn'))
- else:
- self.rewind()
- break
-
- def parse_logical_drive(self, adapter, logical_drive_id):
- ld = {
- 'id': logical_drive_id,
- 'status': None,
- 'status_complete': None,
- }
-
- for line in self:
- if HPSSA.match_any(line, mirror_group_regex, disk_partition_regex):
- self.parse_ignored_section()
- continue
-
- match = ld_status_regex.match(line)
- if match:
- ld['status'] = match.group('status') == 'OK'
-
- if match.group('percentage'):
- ld['status_complete'] = float(match.group('percentage')) / 100
- elif HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
- or not key_value_regex.match(line):
- self.rewind()
- break
-
- adapter['logical_drives'].append(ld)
-
- def parse_physical_drive(self, adapter, fqn):
- pd = {
- 'fqn': fqn,
- 'status': None,
- 'temperature': None,
- }
-
- for line in self:
- if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex):
- self.rewind()
- break
-
- match = key_value_regex.match(line)
- if match:
- key, value = match.group('key', 'value')
- if key == 'Status':
- pd['status'] = value == 'OK'
- elif key == 'Current Temperature (C)':
- pd['temperature'] = int(value)
- else:
- self.rewind()
- break
-
- adapter['physical_drives'].append(pd)
-
- def parse_ignored_section(self):
- for line in self:
- if HPSSA.match_any(line, adapter_regex, array_regex, drive_regex, ignored_sections_regex) \
- or not key_value_regex.match(line):
- self.rewind()
- break
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.ssacli_path = self.configuration.get('ssacli_path', 'ssacli')
- self.use_sudo = self.configuration.get('use_sudo', True)
- self.cmd = []
-
- def get_adapters(self):
- try:
- adapters = HPSSA(self._get_raw_data(command=self.cmd)).adapters
- if not adapters:
- # If no adapters are returned, run the command again but capture stderr
- err = self._get_raw_data(command=self.cmd, stderr=True)
- if err:
- raise HPSSAException('Error executing cmd {}: {}'.format(' '.join(self.cmd), '\n'.join(err)))
- return adapters
- except HPSSAException as ex:
- self.error(ex)
- return []
-
- def check(self):
- if not os.path.isfile(self.ssacli_path):
- ssacli_path = find_binary(self.ssacli_path)
- if ssacli_path:
- self.ssacli_path = ssacli_path
- else:
- self.error('Cannot locate "{}" binary'.format(self.ssacli_path))
- return False
-
- if self.use_sudo:
- sudo = find_binary('sudo')
- if not sudo:
- self.error('Cannot locate "{}" binary'.format('sudo'))
- return False
-
- allowed = self._get_raw_data(command=[sudo, '-n', '-l', self.ssacli_path])
- if not allowed or allowed[0].strip() != os.path.realpath(self.ssacli_path):
- self.error('Not allowed to run sudo for command {}'.format(self.ssacli_path))
- return False
-
- self.cmd = [sudo, '-n']
-
- self.cmd.extend([self.ssacli_path, 'ctrl', 'all', 'show', 'config', 'detail'])
- self.info('Command: {}'.format(self.cmd))
-
- adapters = self.get_adapters()
-
- self.info('Discovered adapters: {}'.format([adapter['type'] for adapter in adapters]))
- if not adapters:
- self.error('No adapters discovered')
- return False
-
- return True
-
- def get_data(self):
- netdata = {}
-
- for adapter in self.get_adapters():
- status_key = '{}_status'.format(adapter['slot'])
- temperature_key = '{}_temperature'.format(adapter['slot'])
- ld_key = 'ld_{}_'.format(adapter['slot'])
-
- data = {
- 'ctrl_status': {
- 'ctrl_' + status_key: adapter['controller']['status'],
- 'cache_' + status_key: adapter['cache']['present'] and adapter['cache']['status'],
- 'battery_' + status_key:
- adapter['battery']['status'] if adapter['battery']['count'] > 0 else None
- },
-
- 'ctrl_temperature': {
- 'ctrl_' + temperature_key: adapter['controller']['temperature'],
- 'cache_' + temperature_key: adapter['cache']['temperature'],
- },
-
- 'ld_status': {
- ld_key + '{}_status'.format(ld['id']): ld['status'] for ld in adapter['logical_drives']
- },
-
- 'pd_status': {},
- 'pd_temperature': {},
- }
-
- for pd in adapter['physical_drives']:
- pd_key = 'pd_{}_{}'.format(adapter['slot'], pd['fqn'])
- data['pd_status'][pd_key + '_status'] = pd['status']
- data['pd_temperature'][pd_key + '_temperature'] = pd['temperature']
-
- for chart, dimension_data in data.items():
- for dimension_id, value in dimension_data.items():
- if value is None:
- continue
-
- if dimension_id not in self.charts[chart]:
- self.charts[chart].add_dimension([dimension_id])
-
- netdata[dimension_id] = value
-
- return netdata
diff --git a/collectors/python.d.plugin/hpssa/hpssa.conf b/collectors/python.d.plugin/hpssa/hpssa.conf
deleted file mode 100644
index cc50c9836..000000000
--- a/collectors/python.d.plugin/hpssa/hpssa.conf
+++ /dev/null
@@ -1,61 +0,0 @@
-# netdata python.d.plugin configuration for hpssa
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 5 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, hpssa also supports the following:
-#
-# ssacli_path: /usr/sbin/ssacli # The path to the ssacli executable
-# use_sudo: True # Whether to use sudo or not
-# ----------------------------------------------------------------------
-
-# ssacli_path: /usr/sbin/ssacli
-# use_sudo: True
diff --git a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
deleted file mode 100644
index d46cc9065..000000000
--- a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
+++ /dev/null
@@ -1,205 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/metadata.yaml"
-sidebar_label: "HP Smart Storage Arrays"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# HP Smart Storage Arrays
-
-
-<img src="https://netdata.cloud/img/hp.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: hpssa
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.
-
-It uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is provided, the collector will try to execute the `ssacli` binary.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per HP Smart Storage Arrays instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |
-| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |
-| hpssa.ld_status | a dimension per logical drive | Status |
-| hpssa.pd_status | a dimension per physical drive | Status |
-| hpssa.pd_temperature | a dimension per physical drive | Celsius |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the hpssa collector
-
-The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d.conf
-```
-
-Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-
-#### Allow user netdata to execute `ssacli` as root.
-
-This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.
-
-- Add to your `/etc/sudoers` file:
-
-`which ssacli` shows the full path to the binary.
-
-```bash
-netdata ALL=(root) NOPASSWD: /path/to/ssacli
-```
-
-- Reset Netdata's systemd
- unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
- distributions with systemd)
-
-The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.
-
-As the `root` user, do the following:
-
-```cmd
-mkdir /etc/systemd/system/netdata.service.d
-echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
-systemctl daemon-reload
-systemctl restart netdata.service
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/hpssa.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/hpssa.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |
-| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |
-
-</details>
-
-#### Examples
-
-##### Local simple config
-
-A basic configuration, specyfing the path to `ssacli`
-
-```yaml
-local:
- ssacli_path: /usr/sbin/ssacli
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin hpssa debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/hpssa/metadata.yaml b/collectors/python.d.plugin/hpssa/metadata.yaml
deleted file mode 100644
index 7871cc276..000000000
--- a/collectors/python.d.plugin/hpssa/metadata.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: hpssa
- monitored_instance:
- name: HP Smart Storage Arrays
- link: 'https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020'
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'hp.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - storage
- - hp
- - hpssa
- - array
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.'
- method_description: 'It uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'If no configuration is provided, the collector will try to execute the `ssacli` binary.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Enable the hpssa collector'
- description: |
- The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
-
- Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
- - title: 'Allow user netdata to execute `ssacli` as root.'
- description: |
- This module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.
-
- - Add to your `/etc/sudoers` file:
-
- `which ssacli` shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/ssacli
- ```
-
- - Reset Netdata's systemd
- unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux
- distributions with systemd)
-
- The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.
-
- As the `root` user, do the following:
-
- ```cmd
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
- configuration:
- file:
- name: python.d/hpssa.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: ssacli_path
- description: Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH
- default_value: ''
- required: false
- - name: use_sudo
- description: Whether or not to use `sudo` to execute `ssacli`
- default_value: 'True'
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local simple config
- description: A basic configuration, specyfing the path to `ssacli`
- folding:
- enabled: false
- config: |
- local:
- ssacli_path: /usr/sbin/ssacli
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: hpssa.ctrl_status
- description: Status 1 is OK, Status 0 is not OK
- unit: "Status"
- chart_type: line
- dimensions:
- - name: ctrl_{adapter slot}_status
- - name: cache_{adapter slot}_status
- - name: battery_{adapter slot}_status per adapter
- - name: hpssa.ctrl_temperature
- description: Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: ctrl_{adapter slot}_temperature
- - name: cache_{adapter slot}_temperature per adapter
- - name: hpssa.ld_status
- description: Status 1 is OK, Status 0 is not OK
- unit: "Status"
- chart_type: line
- dimensions:
- - name: a dimension per logical drive
- - name: hpssa.pd_status
- description: Status 1 is OK, Status 0 is not OK
- unit: "Status"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
- - name: hpssa.pd_temperature
- description: Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
diff --git a/collectors/python.d.plugin/icecast/Makefile.inc b/collectors/python.d.plugin/icecast/Makefile.inc
deleted file mode 100644
index cb7c6fa0e..000000000
--- a/collectors/python.d.plugin/icecast/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += icecast/icecast.chart.py
-dist_pythonconfig_DATA += icecast/icecast.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += icecast/README.md icecast/Makefile.inc
-
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
deleted file mode 120000
index db3c1b572..000000000
--- a/collectors/python.d.plugin/icecast/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/icecast.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/icecast/icecast.chart.py b/collectors/python.d.plugin/icecast/icecast.chart.py
deleted file mode 100644
index a967d1779..000000000
--- a/collectors/python.d.plugin/icecast/icecast.chart.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: icecast netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'listeners',
-]
-
-CHARTS = {
- 'listeners': {
- 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
- 'lines': [
- ]
- }
-}
-
-
-class Source:
- def __init__(self, idx, data):
- self.name = 'source_{0}'.format(idx)
- self.is_active = data.get('stream_start') and data.get('server_name')
- self.listeners = data['listeners']
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url')
- self._manager = self._build_manager()
-
- def check(self):
- """
- Add active sources to the "listeners" chart
- :return: bool
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- active_sources = 0
- for idx, raw_source in enumerate(sources):
- if Source(idx, raw_source).is_active:
- active_sources += 1
- dim_id = 'source_{0}'.format(idx)
- dim = 'source {0}'.format(idx)
- self.definitions['listeners']['lines'].append([dim_id, dim])
-
- return bool(active_sources)
-
- def _get_data(self):
- """
- Get number of listeners for every source
- :return: dict
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- data = dict()
-
- for idx, raw_source in enumerate(sources):
- source = Source(idx, raw_source)
- data[source.name] = source.listeners
-
- return data
-
- def get_sources(self):
- """
- Format data received from http request and return list of sources
- :return: list
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- data = json.loads(raw_data)
- except ValueError as error:
- self.error('JSON decode error:', error)
- return None
-
- sources = data['icestats'].get('source')
- if not sources:
- return None
-
- return sources if isinstance(sources, list) else [sources]
diff --git a/collectors/python.d.plugin/icecast/icecast.conf b/collectors/python.d.plugin/icecast/icecast.conf
deleted file mode 100644
index a33074aef..000000000
--- a/collectors/python.d.plugin/icecast/icecast.conf
+++ /dev/null
@@ -1,81 +0,0 @@
-# netdata python.d.plugin configuration for icecast
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, icecast also supports the following:
-#
-# url: 'URL' # the URL to fetch icecast's stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:8443/status-json.xsl'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/collectors/python.d.plugin/icecast/integrations/icecast.md b/collectors/python.d.plugin/icecast/integrations/icecast.md
deleted file mode 100644
index 12d7d59ee..000000000
--- a/collectors/python.d.plugin/icecast/integrations/icecast.md
+++ /dev/null
@@ -1,166 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/metadata.yaml"
-sidebar_label: "Icecast"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Media Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Icecast
-
-
-<img src="https://netdata.cloud/img/icecast.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: icecast
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Icecast listener counts.
-
-It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Icecast instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| icecast.listeners | a dimension for each active source | listeners |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Icecast minimum version
-
-Needs at least icecast version >= 2.4.0
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/icecast.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/icecast.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |
-| user | Username to use to connect to `url` if it's password protected. | | no |
-| pass | Password to use to connect to `url` if it's password protected. | | no |
-
-</details>
-
-#### Examples
-
-##### Remote Icecast server
-
-Configure a remote icecast server
-
-```yaml
-remote:
- url: 'http://1.2.3.4:8443/status-json.xsl'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin icecast debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/icecast/metadata.yaml b/collectors/python.d.plugin/icecast/metadata.yaml
deleted file mode 100644
index 4bcf5e39f..000000000
--- a/collectors/python.d.plugin/icecast/metadata.yaml
+++ /dev/null
@@ -1,127 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: icecast
- monitored_instance:
- name: Icecast
- link: 'https://icecast.org/'
- categories:
- - data-collection.media-streaming-servers
- icon_filename: 'icecast.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - icecast
- - streaming
- - media
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Icecast listener counts.'
- method_description: 'It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Icecast minimum version'
- description: 'Needs at least icecast version >= 2.4.0'
- configuration:
- file:
- name: python.d/icecast.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: url
- description: The URL (and port) to the icecast server. Needs to also include `/status-json.xsl`
- default_value: 'http://localhost:8443/status-json.xsl'
- required: false
- - name: user
- description: Username to use to connect to `url` if it's password protected.
- default_value: ''
- required: false
- - name: pass
- description: Password to use to connect to `url` if it's password protected.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Remote Icecast server
- description: Configure a remote icecast server
- folding:
- enabled: false
- config: |
- remote:
- url: 'http://1.2.3.4:8443/status-json.xsl'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: icecast.listeners
- description: Number Of Listeners
- unit: "listeners"
- chart_type: line
- dimensions:
- - name: a dimension for each active source
diff --git a/collectors/python.d.plugin/ipfs/Makefile.inc b/collectors/python.d.plugin/ipfs/Makefile.inc
deleted file mode 100644
index 68458cb38..000000000
--- a/collectors/python.d.plugin/ipfs/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += ipfs/ipfs.chart.py
-dist_pythonconfig_DATA += ipfs/ipfs.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += ipfs/README.md ipfs/Makefile.inc
-
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
deleted file mode 120000
index eee6a07b2..000000000
--- a/collectors/python.d.plugin/ipfs/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/ipfs.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/ipfs/integrations/ipfs.md b/collectors/python.d.plugin/ipfs/integrations/ipfs.md
deleted file mode 100644
index 77dc745aa..000000000
--- a/collectors/python.d.plugin/ipfs/integrations/ipfs.md
+++ /dev/null
@@ -1,203 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/metadata.yaml"
-sidebar_label: "IPFS"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# IPFS
-
-
-<img src="https://netdata.cloud/img/ipfs.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: ipfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors IPFS server metrics about its quality and performance.
-
-It connects to an http endpoint of the IPFS server to collect the metrics
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the endpoint is accessible by the Agent, netdata will autodetect it
-
-#### Limits
-
-Calls to the following endpoints are disabled due to IPFS bugs:
-
-/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
-/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
-
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per IPFS instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipfs.bandwidth | in, out | kilobits/s |
-| ipfs.peers | peers | peers |
-| ipfs.repo_size | avail, size | GiB |
-| ipfs.repo_objects | objects, pinned, recursive_pins | objects |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/ipfs.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/ipfs.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary></summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
-| url | URL to the IPFS API | no | yes |
-| repoapi | Collect repo metrics. | no | no |
-| pinapi | Set status of IPFS pinned object polling. | no | no |
-
-</details>
-
-#### Examples
-
-##### Basic (default out-of-the-box)
-
-A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
-remote_host:
- name: 'remote'
- url: 'http://192.0.2.1:5001'
- repoapi: no
- pinapi: no
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin ipfs debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/ipfs/ipfs.chart.py b/collectors/python.d.plugin/ipfs/ipfs.chart.py
deleted file mode 100644
index abfc9c492..000000000
--- a/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: IPFS netdata python.d module
-# Authors: davidak
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'bandwidth',
- 'peers',
- 'repo_size',
- 'repo_objects',
-]
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
- 'lines': [
- ['in', None, 'absolute', 8, 1000],
- ['out', None, 'absolute', -8, 1000]
- ]
- },
- 'peers': {
- 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
- 'lines': [
- ['peers', None, 'absolute']
- ]
- },
- 'repo_size': {
- 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
- 'lines': [
- ['avail', None, 'absolute', 1, 1 << 30],
- ['size', None, 'absolute', 1, 1 << 30],
- ]
- },
- 'repo_objects': {
- 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
- 'lines': [
- ['objects', None, 'absolute', 1, 1],
- ['pinned', None, 'absolute', 1, 1],
- ['recursive_pins', None, 'absolute', 1, 1]
- ]
- }
-}
-
-SI_zeroes = {
- 'k': 3,
- 'm': 6,
- 'g': 9,
- 't': 12,
- 'p': 15,
- 'e': 18,
- 'z': 21,
- 'y': 24
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.baseurl = self.configuration.get('url', 'http://localhost:5001')
- self.method = "POST"
- self.do_pinapi = self.configuration.get('pinapi')
- self.do_repoapi = self.configuration.get('repoapi')
- self.__storage_max = None
-
- def _get_json(self, sub_url):
- """
- :return: json decoding of the specified url
- """
- self.url = self.baseurl + sub_url
- try:
- return json.loads(self._get_raw_data())
- except (TypeError, ValueError):
- return dict()
-
- @staticmethod
- def _recursive_pins(keys):
- return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
-
- @staticmethod
- def _dehumanize(store_max):
- # convert from '10Gb' to 10000000000
- if not isinstance(store_max, int):
- store_max = store_max.lower()
- if store_max.endswith('b'):
- val, units = store_max[:-2], store_max[-2]
- if units in SI_zeroes:
- val += '0' * SI_zeroes[units]
- store_max = val
- try:
- store_max = int(store_max)
- except (TypeError, ValueError):
- store_max = None
- return store_max
-
- def _storagemax(self, store_cfg):
- if self.__storage_max is None:
- self.__storage_max = self._dehumanize(store_cfg)
- return self.__storage_max
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- # suburl : List of (result-key, original-key, transform-func)
- cfg = {
- '/api/v0/stats/bw':
- [
- ('in', 'RateIn', int),
- ('out', 'RateOut', int),
- ],
- '/api/v0/swarm/peers':
- [
- ('peers', 'Peers', len),
- ],
- }
- if self.do_repoapi:
- cfg.update({
- '/api/v0/stats/repo':
- [
- ('size', 'RepoSize', int),
- ('objects', 'NumObjects', int),
- ('avail', 'StorageMax', self._storagemax),
- ],
- })
-
- if self.do_pinapi:
- cfg.update({
- '/api/v0/pin/ls':
- [
- ('pinned', 'Keys', len),
- ('recursive_pins', 'Keys', self._recursive_pins),
- ]
- })
- r = dict()
- for suburl in cfg:
- in_json = self._get_json(suburl)
- for new_key, orig_key, xmute in cfg[suburl]:
- try:
- r[new_key] = xmute(in_json[orig_key])
- except Exception as error:
- self.debug(error)
- return r or None
diff --git a/collectors/python.d.plugin/ipfs/ipfs.conf b/collectors/python.d.plugin/ipfs/ipfs.conf
deleted file mode 100644
index 8b167b399..000000000
--- a/collectors/python.d.plugin/ipfs/ipfs.conf
+++ /dev/null
@@ -1,82 +0,0 @@
-# netdata python.d.plugin configuration for ipfs
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, ipfs also supports the following:
-#
-# url: 'URL' # URL to the IPFS API
-# repoapi: no # Collect repo metrics
-# # Currently defaults to disabled due to IPFS Bug
-# # https://github.com/ipfs/go-ipfs/issues/7528
-# # resulting in very high CPU Usage
-# pinapi: no # Set status of IPFS pinned object polling
-# # Currently defaults to disabled due to IPFS Bug
-# # https://github.com/ipfs/go-ipfs/issues/3874
-# # resulting in very high CPU Usage
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
diff --git a/collectors/python.d.plugin/ipfs/metadata.yaml b/collectors/python.d.plugin/ipfs/metadata.yaml
deleted file mode 100644
index dbc421c90..000000000
--- a/collectors/python.d.plugin/ipfs/metadata.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: ipfs
- monitored_instance:
- name: IPFS
- link: "https://ipfs.tech/"
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "ipfs.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors IPFS server metrics about its quality and performance."
- method_description: "It connects to an http endpoint of the IPFS server to collect the metrics"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If the endpoint is accessible by the Agent, netdata will autodetect it"
- limits:
- description: |
- Calls to the following endpoints are disabled due to IPFS bugs:
-
- /api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
- /api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/ipfs.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: ""
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: The JOB's name as it will appear at the dashboard (by default is the job_name)
- default_value: job_name
- required: false
- - name: url
- description: URL to the IPFS API
- default_value: no
- required: true
- - name: repoapi
- description: Collect repo metrics.
- default_value: no
- required: false
- - name: pinapi
- description: Set status of IPFS pinned object polling.
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic (default out-of-the-box)
- description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
- remote_host:
- name: 'remote'
- url: 'http://192.0.2.1:5001'
- repoapi: no
- pinapi: no
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ipfs_datastore_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/ipfs.conf
- metric: ipfs.repo_size
- info: IPFS datastore utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: ipfs.bandwidth
- description: IPFS Bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: in
- - name: out
- - name: ipfs.peers
- description: IPFS Peers
- unit: "peers"
- chart_type: line
- dimensions:
- - name: peers
- - name: ipfs.repo_size
- description: IPFS Repo Size
- unit: "GiB"
- chart_type: area
- dimensions:
- - name: avail
- - name: size
- - name: ipfs.repo_objects
- description: IPFS Repo Objects
- unit: "objects"
- chart_type: line
- dimensions:
- - name: objects
- - name: pinned
- - name: recursive_pins
diff --git a/collectors/python.d.plugin/litespeed/Makefile.inc b/collectors/python.d.plugin/litespeed/Makefile.inc
deleted file mode 100644
index 5dd645020..000000000
--- a/collectors/python.d.plugin/litespeed/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += litespeed/litespeed.chart.py
-dist_pythonconfig_DATA += litespeed/litespeed.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += litespeed/README.md litespeed/Makefile.inc
-
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
deleted file mode 120000
index e7418b3dc..000000000
--- a/collectors/python.d.plugin/litespeed/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/litespeed.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/litespeed/integrations/litespeed.md b/collectors/python.d.plugin/litespeed/integrations/litespeed.md
deleted file mode 100644
index 87f2d0b12..000000000
--- a/collectors/python.d.plugin/litespeed/integrations/litespeed.md
+++ /dev/null
@@ -1,170 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/metadata.yaml"
-sidebar_label: "Litespeed"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Litespeed
-
-
-<img src="https://netdata.cloud/img/litespeed.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: litespeed
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.
-
-The collector uses the statistics under /tmp/lshttpd to gather the metrics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Litespeed instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| litespeed.net_throughput | in, out | kilobits/s |
-| litespeed.net_throughput | in, out | kilobits/s |
-| litespeed.connections | free, used | conns |
-| litespeed.connections | free, used | conns |
-| litespeed.requests | requests | requests/s |
-| litespeed.requests_processing | processing | requests |
-| litespeed.cache | hits | hits/s |
-| litespeed.cache | hits | hits/s |
-| litespeed.static | hits | hits/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/litespeed.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/litespeed.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |
-
-</details>
-
-#### Examples
-
-##### Set the path to statistics
-
-Change the path for the litespeed stats files
-
-```yaml
-localhost:
- name: 'local'
- path: '/tmp/lshttpd'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin litespeed debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/litespeed/litespeed.chart.py b/collectors/python.d.plugin/litespeed/litespeed.chart.py
deleted file mode 100644
index 7ef8189ea..000000000
--- a/collectors/python.d.plugin/litespeed/litespeed.chart.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: litespeed netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import glob
-import os
-import re
-from collections import namedtuple
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-update_every = 10
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'net_throughput_http', # net throughput
- 'net_throughput_https', # net throughput
- 'connections_http', # connections
- 'connections_https', # connections
- 'requests', # requests
- 'requests_processing', # requests
- 'pub_cache_hits', # cache
- 'private_cache_hits', # cache
- 'static_hits', # static
-]
-
-CHARTS = {
- 'net_throughput_http': {
- 'options': [None, 'Network Throughput HTTP', 'kilobits/s', 'net throughput',
- 'litespeed.net_throughput', 'area'],
- 'lines': [
- ['bps_in', 'in', 'absolute'],
- ['bps_out', 'out', 'absolute', -1]
- ]
- },
- 'net_throughput_https': {
- 'options': [None, 'Network Throughput HTTPS', 'kilobits/s', 'net throughput',
- 'litespeed.net_throughput', 'area'],
- 'lines': [
- ['ssl_bps_in', 'in', 'absolute'],
- ['ssl_bps_out', 'out', 'absolute', -1]
- ]
- },
- 'connections_http': {
- 'options': [None, 'Connections HTTP', 'conns', 'connections', 'litespeed.connections', 'stacked'],
- 'lines': [
- ['conn_free', 'free', 'absolute'],
- ['conn_used', 'used', 'absolute']
- ]
- },
- 'connections_https': {
- 'options': [None, 'Connections HTTPS', 'conns', 'connections', 'litespeed.connections', 'stacked'],
- 'lines': [
- ['ssl_conn_free', 'free', 'absolute'],
- ['ssl_conn_used', 'used', 'absolute']
- ]
- },
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'litespeed.requests', 'line'],
- 'lines': [
- ['requests', None, 'absolute', 1, 100]
- ]
- },
- 'requests_processing': {
- 'options': [None, 'Requests In Processing', 'requests', 'requests', 'litespeed.requests_processing', 'line'],
- 'lines': [
- ['requests_processing', 'processing', 'absolute']
- ]
- },
- 'pub_cache_hits': {
- 'options': [None, 'Public Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
- 'lines': [
- ['pub_cache_hits', 'hits', 'absolute', 1, 100]
- ]
- },
- 'private_cache_hits': {
- 'options': [None, 'Private Cache Hits', 'hits/s', 'cache', 'litespeed.cache', 'line'],
- 'lines': [
- ['private_cache_hits', 'hits', 'absolute', 1, 100]
- ]
- },
- 'static_hits': {
- 'options': [None, 'Static Hits', 'hits/s', 'static', 'litespeed.static', 'line'],
- 'lines': [
- ['static_hits', 'hits', 'absolute', 1, 100]
- ]
- }
-}
-
-t = namedtuple('T', ['key', 'id', 'mul'])
-
-T = [
- t('BPS_IN', 'bps_in', 8),
- t('BPS_OUT', 'bps_out', 8),
- t('SSL_BPS_IN', 'ssl_bps_in', 8),
- t('SSL_BPS_OUT', 'ssl_bps_out', 8),
- t('REQ_PER_SEC', 'requests', 100),
- t('REQ_PROCESSING', 'requests_processing', 1),
- t('PUB_CACHE_HITS_PER_SEC', 'pub_cache_hits', 100),
- t('PRIVATE_CACHE_HITS_PER_SEC', 'private_cache_hits', 100),
- t('STATIC_HITS_PER_SEC', 'static_hits', 100),
- t('PLAINCONN', 'conn_used', 1),
- t('AVAILCONN', 'conn_free', 1),
- t('SSLCONN', 'ssl_conn_used', 1),
- t('AVAILSSL', 'ssl_conn_free', 1),
-]
-
-RE = re.compile(r'([A-Z_]+): ([0-9.]+)')
-
-ZERO_DATA = {
- 'bps_in': 0,
- 'bps_out': 0,
- 'ssl_bps_in': 0,
- 'ssl_bps_out': 0,
- 'requests': 0,
- 'requests_processing': 0,
- 'pub_cache_hits': 0,
- 'private_cache_hits': 0,
- 'static_hits': 0,
- 'conn_used': 0,
- 'conn_free': 0,
- 'ssl_conn_used': 0,
- 'ssl_conn_free': 0,
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.path = self.configuration.get('path', '/tmp/lshttpd/')
- self.files = list()
-
- def check(self):
- if not self.path:
- self.error('"path" not specified')
- return False
-
- fs = glob.glob(os.path.join(self.path, '.rtreport*'))
-
- if not fs:
- self.error('"{0}" has no "rtreport" files or dir is not readable'.format(self.path))
- return None
-
- self.debug('stats files:', fs)
-
- for f in fs:
- if not is_readable_file(f):
- self.error('{0} is not readable'.format(f))
- continue
- self.files.append(f)
-
- return bool(self.files)
-
- def get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = dict(ZERO_DATA)
-
- for f in self.files:
- try:
- with open(f) as b:
- lines = b.readlines()
- except (OSError, IOError) as err:
- self.error(err)
- return None
- else:
- parse_file(data, lines)
-
- return data
-
-
-def parse_file(data, lines):
- for line in lines:
- if not line.startswith(('BPS_IN:', 'MAXCONN:', 'PLAINCONN:', 'REQ_RATE []:')):
- continue
- m = dict(RE.findall(line))
- for v in T:
- if v.key in m:
- data[v.id] += float(m[v.key]) * v.mul
-
-
-def is_readable_file(v):
- return os.path.isfile(v) and os.access(v, os.R_OK)
diff --git a/collectors/python.d.plugin/litespeed/litespeed.conf b/collectors/python.d.plugin/litespeed/litespeed.conf
deleted file mode 100644
index a326e184e..000000000
--- a/collectors/python.d.plugin/litespeed/litespeed.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for litespeed
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, lightspeed also supports the following:
-#
-# path: 'PATH' # path to lightspeed stats files directory
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- path : '/tmp/lshttpd/'
diff --git a/collectors/python.d.plugin/litespeed/metadata.yaml b/collectors/python.d.plugin/litespeed/metadata.yaml
deleted file mode 100644
index 400f3a7f6..000000000
--- a/collectors/python.d.plugin/litespeed/metadata.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: litespeed
- monitored_instance:
- name: Litespeed
- link: "https://www.litespeedtech.com/products/litespeed-web-server"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "litespeed.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - litespeed
- - web
- - server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery."
- method_description: "The collector uses the statistics under /tmp/lshttpd to gather the metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/litespeed.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: path
- description: Use a different path than the default, where the lightspeed stats files reside.
- default_value: "/tmp/lshttpd/"
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Set the path to statistics
- description: Change the path for the litespeed stats files
- config: |
- localhost:
- name: 'local'
- path: '/tmp/lshttpd'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: litespeed.net_throughput
- description: Network Throughput HTTP
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: litespeed.net_throughput
- description: Network Throughput HTTPS
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: litespeed.connections
- description: Connections HTTP
- unit: "conns"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: litespeed.connections
- description: Connections HTTPS
- unit: "conns"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: litespeed.requests
- description: Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: litespeed.requests_processing
- description: Requests In Processing
- unit: "requests"
- chart_type: line
- dimensions:
- - name: processing
- - name: litespeed.cache
- description: Public Cache Hits
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: litespeed.cache
- description: Private Cache Hits
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: litespeed.static
- description: Static Hits
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hits
diff --git a/collectors/python.d.plugin/megacli/Makefile.inc b/collectors/python.d.plugin/megacli/Makefile.inc
deleted file mode 100644
index 83680d723..000000000
--- a/collectors/python.d.plugin/megacli/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += megacli/megacli.chart.py
-dist_pythonconfig_DATA += megacli/megacli.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += megacli/README.md megacli/Makefile.inc
-
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
deleted file mode 120000
index e5df4d41d..000000000
--- a/collectors/python.d.plugin/megacli/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/megacli.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/megacli/integrations/megacli.md b/collectors/python.d.plugin/megacli/integrations/megacli.md
deleted file mode 100644
index 0c4af78a9..000000000
--- a/collectors/python.d.plugin/megacli/integrations/megacli.md
+++ /dev/null
@@ -1,220 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/metadata.yaml"
-sidebar_label: "MegaCLI"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# MegaCLI
-
-
-<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: megacli
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.
-
-Collects adapter, physical drives and battery stats using megacli command-line tool
-
-Executed commands:
-
- - `sudo -n megacli -LDPDInfo -aAll`
- - `sudo -n megacli -AdpBbuCmd -a0`
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
-
-### Default Behavior
-
-#### Auto-Detection
-
-After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per MegaCLI instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| megacli.adapter_degraded | a dimension per adapter | is degraded |
-| megacli.pd_media_error | a dimension per physical drive | errors/s |
-| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |
-
-### Per battery
-
-Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| megacli.bbu_relative_charge | adapter {battery id} | percentage |
-| megacli.bbu_cycle_count | adapter {battery id} | cycle count |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |
-| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |
-| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |
-| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |
-| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |
-
-
-## Setup
-
-### Prerequisites
-
-#### Grant permissions for netdata, to run megacli as sudoer
-
-The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
-
-Add to your /etc/sudoers file:
-which megacli shows the full path to the binary.
-
-```bash
-netdata ALL=(root) NOPASSWD: /path/to/megacli
-```
-
-
-#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)
-
-The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
-
-As root user, do the following:
-
-```bash
-mkdir /etc/systemd/system/netdata.service.d
-echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
-systemctl daemon-reload
-systemctl restart netdata.service
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/megacli.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/megacli.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration per job
-
-```yaml
-job_name:
- name: myname
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin megacli debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
deleted file mode 100644
index 8222092a8..000000000
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: megacli netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-update_every = 5
-
-
-def adapter_charts(ads):
- order = [
- 'adapter_degraded',
- ]
-
- def dims(ad):
- return [['adapter_{0}_degraded'.format(a.id), 'adapter {0}'.format(a.id)] for a in ad]
-
- charts = {
- 'adapter_degraded': {
- 'options': [None, 'Adapter State', 'is degraded', 'adapter', 'megacli.adapter_degraded', 'line'],
- 'lines': dims(ads)
- },
- }
-
- return order, charts
-
-
-def pd_charts(pds):
- order = [
- 'pd_media_error',
- 'pd_predictive_failure',
- ]
-
- def dims(k, pd):
- return [['slot_{0}_{1}'.format(p.id, k), 'slot {0}'.format(p.id), 'incremental'] for p in pd]
-
- charts = {
- 'pd_media_error': {
- 'options': [None, 'Physical Drives Media Errors', 'errors/s', 'pd', 'megacli.pd_media_error', 'line'],
- 'lines': dims('media_error', pds)
- },
- 'pd_predictive_failure': {
- 'options': [None, 'Physical Drives Predictive Failures', 'failures/s', 'pd',
- 'megacli.pd_predictive_failure', 'line'],
- 'lines': dims('predictive_failure', pds)
- }
- }
-
- return order, charts
-
-
-def battery_charts(bats):
- order = list()
- charts = dict()
-
- for b in bats:
- order.append('bbu_{0}_relative_charge'.format(b.id))
- charts.update(
- {
- 'bbu_{0}_relative_charge'.format(b.id): {
- 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
- 'megacli.bbu_relative_charge', 'line'],
- 'lines': [
- ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
- ]
- }
- }
- )
-
- for b in bats:
- order.append('bbu_{0}_cycle_count'.format(b.id))
- charts.update(
- {
- 'bbu_{0}_cycle_count'.format(b.id): {
- 'options': [None, 'Cycle Count', 'cycle count', 'battery', 'megacli.bbu_cycle_count', 'line'],
- 'lines': [
- ['bbu_{0}_cycle_count'.format(b.id), 'adapter {0}'.format(b.id)],
- ]
- }
- }
- )
-
- return order, charts
-
-
-RE_ADAPTER = re.compile(
- r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z ]+)'
-)
-
-RE_VD = re.compile(
- r'Slot Number: ([0-9]+) Media Error Count: ([0-9]+) Predictive Failure Count: ([0-9]+)'
-)
-
-RE_BATTERY = re.compile(
- r'BBU Capacity Info for Adapter: ([0-9]+) Relative State of Charge: ([0-9]+) % Cycle Count: ([0-9]+)'
-)
-
-
-def find_adapters(d):
- keys = ('Adapter #', 'State')
- d = ' '.join(v.strip() for v in d if v.startswith(keys))
- return [Adapter(*v) for v in RE_ADAPTER.findall(d)]
-
-
-def find_pds(d):
- keys = ('Slot Number', 'Media Error Count', 'Predictive Failure Count')
- d = ' '.join(v.strip() for v in d if v.startswith(keys))
- return [PD(*v) for v in RE_VD.findall(d)]
-
-
-def find_batteries(d):
- keys = ('BBU Capacity Info for Adapter', 'Relative State of Charge', 'Cycle Count')
- d = ' '.join(v.strip() for v in d if v.strip().startswith(keys))
- return [Battery(*v) for v in RE_BATTERY.findall(d)]
-
-
-class Adapter:
- def __init__(self, n, state):
- self.id = n
- # TODO: Rewrite all of this
- self.state = int(state in ("Partially Degraded", "Degraded", "Failed"))
-
- def data(self):
- return {
- 'adapter_{0}_degraded'.format(self.id): self.state,
- }
-
-class PD:
- def __init__(self, n, media_err, predict_fail):
- self.id = n
- self.media_err = media_err
- self.predict_fail = predict_fail
-
- def data(self):
- return {
- 'slot_{0}_media_error'.format(self.id): self.media_err,
- 'slot_{0}_predictive_failure'.format(self.id): self.predict_fail,
- }
-
-
-class Battery:
- def __init__(self, adapt_id, rel_charge, cycle_count):
- self.id = adapt_id
- self.rel_charge = rel_charge
- self.cycle_count = cycle_count
-
- def data(self):
- return {
- 'bbu_{0}_relative_charge'.format(self.id): self.rel_charge,
- 'bbu_{0}_cycle_count'.format(self.id): self.cycle_count,
- }
-
-
-# TODO: hardcoded sudo...
-class Megacli:
- def __init__(self):
- self.s = find_binary('sudo')
- self.m = find_binary('megacli') or find_binary('MegaCli') # Binary on FreeBSD is MegaCli
- self.sudo_check = [self.s, '-n', '-l']
- self.disk_info = [self.s, '-n', self.m, '-LDPDInfo', '-aAll', '-NoLog']
- self.battery_info = [self.s, '-n', self.m, '-AdpBbuCmd', '-a0', '-NoLog']
-
- def __bool__(self):
- return bool(self.s and self.m)
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.do_battery = self.configuration.get('do_battery')
- self.megacli = Megacli()
-
- def check_sudo(self):
- err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)
- if err:
- self.error(''.join(err))
- return False
- return True
-
- def check_disk_info(self):
- d = self._get_raw_data(command=self.megacli.disk_info)
- if not d:
- return False
-
- ads = find_adapters(d)
- pds = find_pds(d)
-
- if not (ads and pds):
- self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.disk_info)))
- return False
-
- o, c = adapter_charts(ads)
- self.order.extend(o)
- self.definitions.update(c)
-
- o, c = pd_charts(pds)
- self.order.extend(o)
- self.definitions.update(c)
-
- return True
-
- def check_battery(self):
- d = self._get_raw_data(command=self.megacli.battery_info)
- if not d:
- return False
-
- bats = find_batteries(d)
-
- if not bats:
- self.error('failed to parse "{0}" output'.format(' '.join(self.megacli.battery_info)))
- return False
-
- o, c = battery_charts(bats)
- self.order.extend(o)
- self.definitions.update(c)
- return True
-
- def check(self):
- if not self.megacli:
- self.error('can\'t locate "sudo" or "megacli" binary')
- return None
-
- if not (self.check_sudo() and self.check_disk_info()):
- return False
-
- if self.do_battery:
- self.do_battery = self.check_battery()
-
- return True
-
- def get_data(self):
- data = dict()
-
- data.update(self.get_adapter_pd_data())
-
- if self.do_battery:
- data.update(self.get_battery_data())
-
- return data or None
-
- def get_adapter_pd_data(self):
- raw = self._get_raw_data(command=self.megacli.disk_info)
- data = dict()
-
- if not raw:
- return data
-
- for a in find_adapters(raw):
- data.update(a.data())
-
- for p in find_pds(raw):
- data.update(p.data())
-
- return data
-
- def get_battery_data(self):
- raw = self._get_raw_data(command=self.megacli.battery_info)
- data = dict()
-
- if not raw:
- return data
-
- for b in find_batteries(raw):
- data.update(b.data())
-
- return data
diff --git a/collectors/python.d.plugin/megacli/megacli.conf b/collectors/python.d.plugin/megacli/megacli.conf
deleted file mode 100644
index 1af4292d9..000000000
--- a/collectors/python.d.plugin/megacli/megacli.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for megacli
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, megacli also supports the following:
-#
-# do_battery: yes/no # default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
-#
-# ----------------------------------------------------------------------
-# uncomment the line below to collect battery statistics
-# do_battery: yes
diff --git a/collectors/python.d.plugin/megacli/metadata.yaml b/collectors/python.d.plugin/megacli/metadata.yaml
deleted file mode 100644
index 4a2ba43ee..000000000
--- a/collectors/python.d.plugin/megacli/metadata.yaml
+++ /dev/null
@@ -1,193 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: megacli
- monitored_instance:
- name: MegaCLI
- link: "https://wikitech.wikimedia.org/wiki/MegaCli"
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "hard-drive.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - storage
- - raid-controller
- - manage-disks
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Examine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics."
- method_description: |
- Collects adapter, physical drives and battery stats using megacli command-line tool
-
- Executed commands:
-
- - `sudo -n megacli -LDPDInfo -aAll`
- - `sudo -n megacli -AdpBbuCmd -a0`
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: "The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password."
- default_behavior:
- auto_detection:
- description: "After all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Grant permissions for netdata, to run megacli as sudoer
- description: |
- The module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.
-
- Add to your /etc/sudoers file:
- which megacli shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/megacli
- ```
- - title: "Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)"
- description: |
- The default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.
-
- As root user, do the following:
-
- ```bash
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
- configuration:
- file:
- name: "python.d/megacli.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: do_battery
- description: default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`).
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration per job
- config: |
- job_name:
- name: myname
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: megacli_adapter_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.adapter_degraded
- info: "adapter is in the degraded state (0: false, 1: true)"
- - name: megacli_pd_media_errors
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.pd_media_error
- info: number of physical drive media errors
- - name: megacli_pd_predictive_failures
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.pd_predictive_failure
- info: number of physical drive predictive failures
- - name: megacli_bbu_relative_charge
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.bbu_relative_charge
- info: average battery backup unit (BBU) relative state of charge over the last 10 seconds
- - name: megacli_bbu_cycle_count
- link: https://github.com/netdata/netdata/blob/master/health/health.d/megacli.conf
- metric: megacli.bbu_cycle_count
- info: average battery backup unit (BBU) charge cycles count over the last 10 seconds
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: megacli.adapter_degraded
- description: Adapter State
- unit: "is degraded"
- chart_type: line
- dimensions:
- - name: a dimension per adapter
- - name: megacli.pd_media_error
- description: Physical Drives Media Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
- - name: megacli.pd_predictive_failure
- description: Physical Drives Predictive Failures
- unit: "failures/s"
- chart_type: line
- dimensions:
- - name: a dimension per physical drive
- - name: battery
- description: "Metrics related to Battery Backup Units, each BBU provides its own set of the following metrics."
- labels: []
- metrics:
- - name: megacli.bbu_relative_charge
- description: Relative State of Charge
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: adapter {battery id}
- - name: megacli.bbu_cycle_count
- description: Cycle Count
- unit: "cycle count"
- chart_type: line
- dimensions:
- - name: adapter {battery id}
diff --git a/collectors/python.d.plugin/memcached/Makefile.inc b/collectors/python.d.plugin/memcached/Makefile.inc
deleted file mode 100644
index e60357161..000000000
--- a/collectors/python.d.plugin/memcached/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += memcached/memcached.chart.py
-dist_pythonconfig_DATA += memcached/memcached.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += memcached/README.md memcached/Makefile.inc
-
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
deleted file mode 120000
index 2cb76d33c..000000000
--- a/collectors/python.d.plugin/memcached/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/memcached.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/memcached/integrations/memcached.md b/collectors/python.d.plugin/memcached/integrations/memcached.md
deleted file mode 100644
index 113b86c8c..000000000
--- a/collectors/python.d.plugin/memcached/integrations/memcached.md
+++ /dev/null
@@ -1,215 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/metadata.yaml"
-sidebar_label: "Memcached"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Memcached
-
-
-<img src="https://netdata.cloud/img/memcached.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: memcached
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.
-
-It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Memcached instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| memcached.cache | available, used | MiB |
-| memcached.net | in, out | kilobits/s |
-| memcached.connections | current, rejected, total | connections/s |
-| memcached.items | current, total | items |
-| memcached.evicted_reclaimed | reclaimed, evicted | items |
-| memcached.get | hints, misses | requests |
-| memcached.get_rate | rate | requests/s |
-| memcached.set_rate | rate | requests/s |
-| memcached.delete | hits, misses | requests |
-| memcached.cas | hits, misses, bad value | requests |
-| memcached.increment | hits, misses | requests |
-| memcached.decrement | hits, misses | requests |
-| memcached.touch | hits, misses | requests |
-| memcached.touch_rate | rate | requests/s |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |
-| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |
-| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/memcached.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/memcached.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| host | the host to connect to. | 127.0.0.1 | no |
-| port | the port to connect to. | 11211 | no |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### localhost
-
-An example configuration for localhost.
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 11211
-
-```
-##### localipv4
-
-An example configuration for localipv4.
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 11211
-
-```
-</details>
-
-##### localipv6
-
-An example configuration for localipv6.
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '::1'
- port: 11211
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin memcached debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/memcached/memcached.chart.py b/collectors/python.d.plugin/memcached/memcached.chart.py
deleted file mode 100644
index adb9560b7..000000000
--- a/collectors/python.d.plugin/memcached/memcached.chart.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: memcached netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'cache',
- 'net',
- 'connections',
- 'items',
- 'evicted_reclaimed',
- 'get',
- 'get_rate',
- 'set_rate',
- 'cas',
- 'delete',
- 'increment',
- 'decrement',
- 'touch',
- 'touch_rate',
-]
-
-CHARTS = {
- 'cache': {
- 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
- 'lines': [
- ['avail', 'available', 'absolute', 1, 1 << 20],
- ['used', 'used', 'absolute', 1, 1 << 20]
- ]
- },
- 'net': {
- 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
- 'lines': [
- ['bytes_read', 'in', 'incremental', 8, 1000],
- ['bytes_written', 'out', 'incremental', -8, 1000],
- ]
- },
- 'connections': {
- 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
- 'lines': [
- ['curr_connections', 'current', 'incremental'],
- ['rejected_connections', 'rejected', 'incremental'],
- ['total_connections', 'total', 'incremental']
- ]
- },
- 'items': {
- 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
- 'lines': [
- ['curr_items', 'current', 'absolute'],
- ['total_items', 'total', 'absolute']
- ]
- },
- 'evicted_reclaimed': {
- 'options': [None, 'Evicted and Reclaimed Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
- 'lines': [
- ['reclaimed', 'reclaimed', 'absolute'],
- ['evictions', 'evicted', 'absolute']
- ]
- },
- 'get': {
- 'options': [None, 'Get Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
- 'lines': [
- ['get_hits', 'hits', 'percent-of-absolute-row'],
- ['get_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'get_rate': {
- 'options': [None, 'Get Request Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
- 'lines': [
- ['cmd_get', 'rate', 'incremental']
- ]
- },
- 'set_rate': {
- 'options': [None, 'Set Request Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
- 'lines': [
- ['cmd_set', 'rate', 'incremental']
- ]
- },
- 'delete': {
- 'options': [None, 'Delete Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
- 'lines': [
- ['delete_hits', 'hits', 'percent-of-absolute-row'],
- ['delete_misses', 'misses', 'percent-of-absolute-row'],
- ]
- },
- 'cas': {
- 'options': [None, 'Check and Set Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
- 'lines': [
- ['cas_hits', 'hits', 'percent-of-absolute-row'],
- ['cas_misses', 'misses', 'percent-of-absolute-row'],
- ['cas_badval', 'bad value', 'percent-of-absolute-row']
- ]
- },
- 'increment': {
- 'options': [None, 'Increment Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
- 'lines': [
- ['incr_hits', 'hits', 'percent-of-absolute-row'],
- ['incr_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'decrement': {
- 'options': [None, 'Decrement Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
- 'lines': [
- ['decr_hits', 'hits', 'percent-of-absolute-row'],
- ['decr_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'touch': {
- 'options': [None, 'Touch Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
- 'lines': [
- ['touch_hits', 'hits', 'percent-of-absolute-row'],
- ['touch_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'touch_rate': {
- 'options': [None, 'Touch Request Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
- 'lines': [
- ['cmd_touch', 'rate', 'incremental']
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.request = 'stats\r\n'
- self.host = 'localhost'
- self.port = 11211
- self._keep_alive = True
- self.unix_socket = None
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- response = self._get_raw_data()
- if response is None:
- # error has already been logged
- return None
-
- if response.startswith('ERROR'):
- self.error('received ERROR')
- return None
-
- try:
- parsed = response.split('\n')
- except AttributeError:
- self.error('response is invalid/empty')
- return None
-
- # split the response
- data = {}
- for line in parsed:
- if line.startswith('STAT'):
- try:
- t = line[5:].split(' ')
- data[t[0]] = t[1]
- except (IndexError, ValueError):
- self.debug('invalid line received: ' + str(line))
-
- if not data:
- self.error("received data doesn't have any records")
- return None
-
- # custom calculations
- try:
- data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
- data['used'] = int(data['bytes'])
- except (KeyError, ValueError, TypeError):
- pass
-
- return data
-
- def _check_raw_data(self, data):
- if data.endswith('END\r\n'):
- self.debug('received full response from memcached')
- return True
-
- self.debug('waiting more data from memcached')
- return False
-
- def check(self):
- """
- Parse configuration, check if memcached is available
- :return: boolean
- """
- self._parse_config()
- data = self._get_data()
- if data is None:
- return False
- return True
diff --git a/collectors/python.d.plugin/memcached/memcached.conf b/collectors/python.d.plugin/memcached/memcached.conf
deleted file mode 100644
index 3286b4623..000000000
--- a/collectors/python.d.plugin/memcached/memcached.conf
+++ /dev/null
@@ -1,90 +0,0 @@
-# netdata python.d.plugin configuration for memcached
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, memcached also supports the following:
-#
-# socket: 'path/to/memcached.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 11211
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 11211
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 11211
-
diff --git a/collectors/python.d.plugin/memcached/metadata.yaml b/collectors/python.d.plugin/memcached/metadata.yaml
deleted file mode 100644
index 38c9f6853..000000000
--- a/collectors/python.d.plugin/memcached/metadata.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: memcached
- monitored_instance:
- name: Memcached
- link: https://memcached.org/
- categories:
- - data-collection.database-servers
- icon_filename: "memcached.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - memcached
- - memcache
- - cache
- - database
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching."
- method_description: "It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats))."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/memcached.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: host
- description: the host to connect to.
- default_value: "127.0.0.1"
- required: false
- - name: port
- description: the port to connect to.
- default_value: "11211"
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: localhost
- description: An example configuration for localhost.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 11211
- - name: localipv4
- description: An example configuration for localipv4.
- folding:
- enabled: true
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 11211
- - name: localipv6
- description: An example configuration for localipv6.
- folding:
- enabled: true
- config: |
- localhost:
- name: 'local'
- host: '::1'
- port: 11211
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: memcached_cache_memory_usage
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
- metric: memcached.cache
- info: cache memory utilization
- - name: memcached_cache_fill_rate
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
- metric: memcached.cache
- info: average rate the cache fills up (positive), or frees up (negative) space over the last hour
- - name: memcached_out_of_cache_space_time
- link: https://github.com/netdata/netdata/blob/master/health/health.d/memcached.conf
- metric: memcached.cache
- info: estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: memcached.cache
- description: Cache Size
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: available
- - name: used
- - name: memcached.net
- description: Network
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: memcached.connections
- description: Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: current
- - name: rejected
- - name: total
- - name: memcached.items
- description: Items
- unit: "items"
- chart_type: line
- dimensions:
- - name: current
- - name: total
- - name: memcached.evicted_reclaimed
- description: Evicted and Reclaimed Items
- unit: "items"
- chart_type: line
- dimensions:
- - name: reclaimed
- - name: evicted
- - name: memcached.get
- description: Get Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hints
- - name: misses
- - name: memcached.get_rate
- description: Get Request Rate
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: rate
- - name: memcached.set_rate
- description: Set Request Rate
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: rate
- - name: memcached.delete
- description: Delete Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: memcached.cas
- description: Check and Set Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: bad value
- - name: memcached.increment
- description: Increment Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: memcached.decrement
- description: Decrement Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: memcached.touch
- description: Touch Requests
- unit: "requests"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: misses
- - name: memcached.touch_rate
- description: Touch Request Rate
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: rate
diff --git a/collectors/python.d.plugin/monit/Makefile.inc b/collectors/python.d.plugin/monit/Makefile.inc
deleted file mode 100644
index 4a3673fd5..000000000
--- a/collectors/python.d.plugin/monit/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += monit/monit.chart.py
-dist_pythonconfig_DATA += monit/monit.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += monit/README.md monit/Makefile.inc
-
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
deleted file mode 120000
index ac69496f4..000000000
--- a/collectors/python.d.plugin/monit/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/monit.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/monit/integrations/monit.md b/collectors/python.d.plugin/monit/integrations/monit.md
deleted file mode 100644
index 18219141d..000000000
--- a/collectors/python.d.plugin/monit/integrations/monit.md
+++ /dev/null
@@ -1,214 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/metadata.yaml"
-sidebar_label: "Monit"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Synthetic Checks"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Monit
-
-
-<img src="https://netdata.cloud/img/monit.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: monit
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
-
-
-It gathers data from Monit's XML interface.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will attempt to connect to Monit at `http://localhost:2812`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Monit instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| monit.filesystems | a dimension per target | filesystems |
-| monit.directories | a dimension per target | directories |
-| monit.files | a dimension per target | files |
-| monit.fifos | a dimension per target | pipes |
-| monit.programs | a dimension per target | programs |
-| monit.services | a dimension per target | processes |
-| monit.process_uptime | a dimension per target | seconds |
-| monit.process_threads | a dimension per target | threads |
-| monit.process_childrens | a dimension per target | children |
-| monit.hosts | a dimension per target | hosts |
-| monit.host_latency | a dimension per target | milliseconds |
-| monit.networks | a dimension per target | interfaces |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/monit.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/monit.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
-| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |
-| user | Username in case the URL is password protected. | | no |
-| pass | Password in case the URL is password protected. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
-
-```
-##### Basic Authentication
-
-Example using basic username and password in order to authenticate.
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
- user: 'foo'
- pass: 'bar'
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:2812'
-
-remote_job:
- name: 'remote'
- url: 'http://192.0.2.1:2812'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin monit debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/monit/metadata.yaml b/collectors/python.d.plugin/monit/metadata.yaml
deleted file mode 100644
index b51273188..000000000
--- a/collectors/python.d.plugin/monit/metadata.yaml
+++ /dev/null
@@ -1,217 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: monit
- monitored_instance:
- name: Monit
- link: https://mmonit.com/monit/
- categories:
- - data-collection.synthetic-checks
- icon_filename: "monit.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - monit
- - mmonit
- - supervision tool
- - monitrc
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
- method_description: |
- It gathers data from Monit's XML interface.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will attempt to connect to Monit at `http://localhost:2812`
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/monit.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: "local"
- required: false
- - name: url
- description: The URL to fetch Monit's metrics.
- default_value: http://localhost:2812
- required: true
- - name: user
- description: Username in case the URL is password protected.
- default_value: ""
- required: false
- - name: pass
- description: Password in case the URL is password protected.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:2812'
- - name: Basic Authentication
- description: Example using basic username and password in order to authenticate.
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:2812'
- user: 'foo'
- pass: 'bar'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:2812'
-
- remote_job:
- name: 'remote'
- url: 'http://192.0.2.1:2812'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: monit.filesystems
- description: Filesystems
- unit: "filesystems"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.directories
- description: Directories
- unit: "directories"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.files
- description: Files
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.fifos
- description: Pipes (fifo)
- unit: "pipes"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.programs
- description: Programs statuses
- unit: "programs"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.services
- description: Processes statuses
- unit: "processes"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_uptime
- description: Processes uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_threads
- description: Processes threads
- unit: "threads"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_childrens
- description: Child processes
- unit: "children"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.hosts
- description: Hosts
- unit: "hosts"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.host_latency
- description: Hosts latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.networks
- description: Network interfaces and addresses
- unit: "interfaces"
- chart_type: line
- dimensions:
- - name: a dimension per target
diff --git a/collectors/python.d.plugin/monit/monit.chart.py b/collectors/python.d.plugin/monit/monit.chart.py
deleted file mode 100644
index 5d926961b..000000000
--- a/collectors/python.d.plugin/monit/monit.chart.py
+++ /dev/null
@@ -1,360 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: monit netdata python.d module
-# Author: Evgeniy K. (n0guest)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import xml.etree.ElementTree as ET
-from collections import namedtuple
-
-from bases.FrameworkServices.UrlService import UrlService
-
-MonitType = namedtuple('MonitType', ('index', 'name'))
-
-# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
-# typedef enum {
-# Service_Filesystem = 0,
-# Service_Directory,
-# Service_File,
-# Service_Process,
-# Service_Host,
-# Service_System,
-# Service_Fifo,
-# Service_Program,
-# Service_Net,
-# Service_Last = Service_Net
-# } __attribute__((__packed__)) Service_Type;
-
-TYPE_FILESYSTEM = MonitType(0, 'filesystem')
-TYPE_DIRECTORY = MonitType(1, 'directory')
-TYPE_FILE = MonitType(2, 'file')
-TYPE_PROCESS = MonitType(3, 'process')
-TYPE_HOST = MonitType(4, 'host')
-TYPE_SYSTEM = MonitType(5, 'system')
-TYPE_FIFO = MonitType(6, 'fifo')
-TYPE_PROGRAM = MonitType(7, 'program')
-TYPE_NET = MonitType(8, 'net')
-
-TYPES = (
- TYPE_FILESYSTEM,
- TYPE_DIRECTORY,
- TYPE_FILE,
- TYPE_PROCESS,
- TYPE_HOST,
- TYPE_SYSTEM,
- TYPE_FIFO,
- TYPE_PROGRAM,
- TYPE_NET,
-)
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'filesystem',
- 'directory',
- 'file',
- 'process',
- 'process_uptime',
- 'process_threads',
- 'process_children',
- 'host',
- 'host_latency',
- 'system',
- 'fifo',
- 'program',
- 'net'
-]
-
-CHARTS = {
- 'filesystem': {
- 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
- 'lines': []
- },
- 'directory': {
- 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
- 'lines': []
- },
- 'file': {
- 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
- 'lines': []
- },
- 'fifo': {
- 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
- 'lines': []
- },
- 'program': {
- 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
- 'lines': []
- },
- 'process': {
- 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
- 'lines': []
- },
- 'process_uptime': {
- 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
- 'monit.process_uptime', 'line', 'hidden'],
- 'lines': []
- },
- 'process_threads': {
- 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
- 'monit.process_threads', 'line'],
- 'lines': []
- },
- 'process_children': {
- 'options': ['processes childrens', 'Child processes', 'children', 'applications',
- 'monit.process_childrens', 'line'],
- 'lines': []
- },
- 'host': {
- 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
- 'lines': []
- },
- 'host_latency': {
- 'options': ['hosts latency', 'Hosts latency', 'milliseconds', 'network', 'monit.host_latency', 'line'],
- 'lines': []
- },
- 'net': {
- 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
- 'monit.networks', 'line'],
- 'lines': []
- },
-}
-
-
-class BaseMonitService(object):
- def __init__(self, typ, name, status, monitor):
- self.type = typ
- self.name = name
- self.status = status
- self.monitor = monitor
-
- def __repr__(self):
- return 'MonitService({0}:{1})'.format(self.type.name, self.name)
-
- def __eq__(self, other):
- if not isinstance(other, BaseMonitService):
- return False
- return self.type == other.type and self.name == other.name
-
- def __ne__(self, other):
- return not self == other
-
- def __hash__(self):
- return hash(repr(self))
-
- def is_running(self):
- return self.status == '0' and self.monitor == '1'
-
- def key(self):
- return '{0}_{1}'.format(self.type.name, self.name)
-
- def data(self):
- return {self.key(): int(self.is_running())}
-
-
-class ProcessMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
- super(ProcessMonitService, self).__init__(typ, name, status, monitor)
- self.uptime = None
- self.threads = None
- self.children = None
-
- def __eq__(self, other):
- return super(ProcessMonitService, self).__eq__(other)
-
- def __ne__(self, other):
- return super(ProcessMonitService, self).__ne__(other)
-
- def __hash__(self):
- return super(ProcessMonitService, self).__hash__()
-
- def uptime_key(self):
- return 'process_uptime_{0}'.format(self.name)
-
- def threads_key(self):
- return 'process_threads_{0}'.format(self.name)
-
- def children_key(self):
- return 'process_children_{0}'.format(self.name)
-
- def data(self):
- base_data = super(ProcessMonitService, self).data()
- # skipping bugged metrics with negative uptime (monit before v5.16)
- uptime = self.uptime if self.uptime and int(self.uptime) >= 0 else None
- data = {
- self.uptime_key(): uptime,
- self.threads_key(): self.threads,
- self.children_key(): self.children,
- }
- data.update(base_data)
-
- return data
-
-
-class HostMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
- super(HostMonitService, self).__init__(typ, name, status, monitor)
- self.latency = None
-
- def __eq__(self, other):
- return super(HostMonitService, self).__eq__(other)
-
- def __ne__(self, other):
- return super(HostMonitService, self).__ne__(other)
-
- def __hash__(self):
- return super(HostMonitService, self).__hash__()
-
- def latency_key(self):
- return 'host_latency_{0}'.format(self.name)
-
- def data(self):
- base_data = super(HostMonitService, self).data()
- latency = float(self.latency) * 1000000 if self.latency else None
- data = {self.latency_key(): latency}
- data.update(base_data)
-
- return data
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- base_url = self.configuration.get('url', "http://localhost:2812")
- self.url = '{0}/_status?format=xml&level=full'.format(base_url)
- self.active_services = list()
-
- def parse(self, raw):
- try:
- root = ET.fromstring(raw)
- except ET.ParseError:
- self.error("URL {0} didn't return a valid XML page. Please check your settings.".format(self.url))
- return None
- return root
-
- def _get_data(self):
- raw = self._get_raw_data()
- if not raw:
- return None
-
- root = self.parse(raw)
- if root is None:
- return None
-
- services = self.get_services(root)
- if not services:
- return None
-
- if len(self.charts) > 0:
- self.update_charts(services)
-
- data = dict()
-
- for svc in services:
- data.update(svc.data())
-
- return data
-
- def get_services(self, root):
- services = list()
-
- for typ in TYPES:
- if typ == TYPE_SYSTEM:
- self.debug("skipping service from '{0}' category, it's useless in graphs".format(TYPE_SYSTEM.name))
- continue
-
- xpath_query = "./service[@type='{0}']".format(typ.index)
- self.debug('Searching for {0} as {1}'.format(typ.name, xpath_query))
-
- for svc_root in root.findall(xpath_query):
- svc = create_service(svc_root, typ)
- self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(
- svc.name, svc.type.name, svc.status, svc.monitor))
-
- services.append(svc)
-
- return services
-
- def update_charts(self, services):
- remove = [svc for svc in self.active_services if svc not in services]
- add = [svc for svc in services if svc not in self.active_services]
-
- self.remove_services_from_charts(remove)
- self.add_services_to_charts(add)
-
- self.active_services = services
-
- def add_services_to_charts(self, services):
- for svc in services:
- if svc.type == TYPE_HOST:
- self.charts['host_latency'].add_dimension([svc.latency_key(), svc.name, 'absolute', 1000, 1000000])
- if svc.type == TYPE_PROCESS:
- self.charts['process_uptime'].add_dimension([svc.uptime_key(), svc.name])
- self.charts['process_threads'].add_dimension([svc.threads_key(), svc.name])
- self.charts['process_children'].add_dimension([svc.children_key(), svc.name])
- self.charts[svc.type.name].add_dimension([svc.key(), svc.name])
-
- def remove_services_from_charts(self, services):
- for svc in services:
- if svc.type == TYPE_HOST:
- self.charts['host_latency'].del_dimension(svc.latency_key(), False)
- if svc.type == TYPE_PROCESS:
- self.charts['process_uptime'].del_dimension(svc.uptime_key(), False)
- self.charts['process_threads'].del_dimension(svc.threads_key(), False)
- self.charts['process_children'].del_dimension(svc.children_key(), False)
- self.charts[svc.type.name].del_dimension(svc.key(), False)
-
-
-def create_service(root, typ):
- if typ == TYPE_HOST:
- return create_host_service(root)
- elif typ == TYPE_PROCESS:
- return create_process_service(root)
- return create_base_service(root, typ)
-
-
-def create_host_service(root):
- svc = HostMonitService(
- TYPE_HOST,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
-
- latency = root.find('./icmp/responsetime')
- if latency is not None:
- svc.latency = latency.text
-
- return svc
-
-
-def create_process_service(root):
- svc = ProcessMonitService(
- TYPE_PROCESS,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
-
- uptime = root.find('uptime')
- if uptime is not None:
- svc.uptime = uptime.text
-
- threads = root.find('threads')
- if threads is not None:
- svc.threads = threads.text
-
- children = root.find('children')
- if children is not None:
- svc.children = children.text
-
- return svc
-
-
-def create_base_service(root, typ):
- return BaseMonitService(
- typ,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
diff --git a/collectors/python.d.plugin/monit/monit.conf b/collectors/python.d.plugin/monit/monit.conf
deleted file mode 100644
index 9a3fb6938..000000000
--- a/collectors/python.d.plugin/monit/monit.conf
+++ /dev/null
@@ -1,86 +0,0 @@
-# netdata python.d.plugin configuration for monit
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, this plugin also supports the following:
-#
-# url: 'URL' # the URL to fetch monit's status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# Example
-#
-# local:
-# name : 'Local Monit'
-# url : 'http://localhost:2812'
-#
-# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
-# in the monit section.
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
diff --git a/collectors/python.d.plugin/nsd/Makefile.inc b/collectors/python.d.plugin/nsd/Makefile.inc
deleted file mode 100644
index 58e9fd67d..000000000
--- a/collectors/python.d.plugin/nsd/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += nsd/nsd.chart.py
-dist_pythonconfig_DATA += nsd/nsd.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += nsd/README.md nsd/Makefile.inc
-
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
deleted file mode 120000
index 59fcfe491..000000000
--- a/collectors/python.d.plugin/nsd/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/name_server_daemon.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
deleted file mode 100644
index 0e66c44eb..000000000
--- a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
+++ /dev/null
@@ -1,199 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/metadata.yaml"
-sidebar_label: "Name Server Daemon"
-learn_status: "Published"
-learn_rel_path: "Data Collection/DNS and DHCP Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Name Server Daemon
-
-
-<img src="https://netdata.cloud/img/nsd.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: nsd
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors NSD statistics like queries, zones, protocols, query types and more.
-
-
-It uses the `nsd-control stats_noreset` command to gather metrics.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Name Server Daemon instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| nsd.queries | queries | queries/s |
-| nsd.zones | master, slave | zones |
-| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |
-| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |
-| nsd.transfer | NOTIFY, AXFR | queries/s |
-| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### NSD version
-
-The version of `nsd` must be 4.0+.
-
-
-#### Provide Netdata the permissions to run the command
-
-Netdata must have permissions to run the `nsd-control stats_noreset` command.
-
-You can:
-
-- Add "netdata" user to "nsd" group:
- ```
- usermod -aG nsd netdata
- ```
-- Add Netdata to sudoers
- 1. Edit the sudoers file:
- ```
- visudo -f /etc/sudoers.d/netdata
- ```
- 2. Add the entry:
- ```
- Defaults:netdata !requiretty
- netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
- ```
-
- > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/nsd.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/nsd.conf
-```
-#### Options
-
-This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 30 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| command | The command to run | nsd-control stats_noreset | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-local:
- name: 'nsd_local'
- command: 'nsd-control stats_noreset'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin nsd debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/nsd/metadata.yaml b/collectors/python.d.plugin/nsd/metadata.yaml
deleted file mode 100644
index f5e2c46b0..000000000
--- a/collectors/python.d.plugin/nsd/metadata.yaml
+++ /dev/null
@@ -1,201 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: nsd
- monitored_instance:
- name: Name Server Daemon
- link: https://nsd.docs.nlnetlabs.nl/en/latest/#
- categories:
- - data-collection.dns-and-dhcp-servers
- icon_filename: "nsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - nsd
- - name server daemon
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors NSD statistics like queries, zones, protocols, query types and more.
- method_description: |
- It uses the `nsd-control stats_noreset` command to gather metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: NSD version
- description: |
- The version of `nsd` must be 4.0+.
- - title: Provide Netdata the permissions to run the command
- description: |
- Netdata must have permissions to run the `nsd-control stats_noreset` command.
-
- You can:
-
- - Add "netdata" user to "nsd" group:
- ```
- usermod -aG nsd netdata
- ```
- - Add Netdata to sudoers
- 1. Edit the sudoers file:
- ```
- visudo -f /etc/sudoers.d/netdata
- ```
- 2. Add the entry:
- ```
- Defaults:netdata !requiretty
- netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
- ```
-
- > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
-
- configuration:
- file:
- name: "python.d/nsd.conf"
- options:
- description: |
- This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 30
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
- running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: command
- description: The command to run
- default_value: "nsd-control stats_noreset"
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- local:
- name: 'nsd_local'
- command: 'nsd-control stats_noreset'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: nsd.queries
- description: queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: nsd.zones
- description: zones
- unit: "zones"
- chart_type: stacked
- dimensions:
- - name: master
- - name: slave
- - name: nsd.protocols
- description: protocol
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: udp
- - name: udp6
- - name: tcp
- - name: tcp6
- - name: nsd.type
- description: query type
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: A
- - name: NS
- - name: CNAME
- - name: SOA
- - name: PTR
- - name: HINFO
- - name: MX
- - name: NAPTR
- - name: TXT
- - name: AAAA
- - name: SRV
- - name: ANY
- - name: nsd.transfer
- description: transfer
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: NOTIFY
- - name: AXFR
- - name: nsd.rcode
- description: return code
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: NOERROR
- - name: FORMERR
- - name: SERVFAIL
- - name: NXDOMAIN
- - name: NOTIMP
- - name: REFUSED
- - name: YXDOMAIN
diff --git a/collectors/python.d.plugin/nsd/nsd.chart.py b/collectors/python.d.plugin/nsd/nsd.chart.py
deleted file mode 100644
index 6f9b2cec8..000000000
--- a/collectors/python.d.plugin/nsd/nsd.chart.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: NSD `nsd-control stats_noreset` netdata python.d module
-# Author: <383c57 at gmail.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-update_every = 30
-
-NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
-REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
-
-ORDER = [
- 'queries',
- 'zones',
- 'protocol',
- 'type',
- 'transfer',
- 'rcode',
-]
-
-CHARTS = {
- 'queries': {
- 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
- 'lines': [
- ['num_queries', 'queries', 'incremental']
- ]
- },
- 'zones': {
- 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
- 'lines': [
- ['zone_master', 'master', 'absolute'],
- ['zone_slave', 'slave', 'absolute']
- ]
- },
- 'protocol': {
- 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
- 'lines': [
- ['num_udp', 'udp', 'incremental'],
- ['num_udp6', 'udp6', 'incremental'],
- ['num_tcp', 'tcp', 'incremental'],
- ['num_tcp6', 'tcp6', 'incremental']
- ]
- },
- 'type': {
- 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
- 'lines': [
- ['num_type_A', 'A', 'incremental'],
- ['num_type_NS', 'NS', 'incremental'],
- ['num_type_CNAME', 'CNAME', 'incremental'],
- ['num_type_SOA', 'SOA', 'incremental'],
- ['num_type_PTR', 'PTR', 'incremental'],
- ['num_type_HINFO', 'HINFO', 'incremental'],
- ['num_type_MX', 'MX', 'incremental'],
- ['num_type_NAPTR', 'NAPTR', 'incremental'],
- ['num_type_TXT', 'TXT', 'incremental'],
- ['num_type_AAAA', 'AAAA', 'incremental'],
- ['num_type_SRV', 'SRV', 'incremental'],
- ['num_type_TYPE255', 'ANY', 'incremental']
- ]
- },
- 'transfer': {
- 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
- 'lines': [
- ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
- ['num_type_TYPE252', 'AXFR', 'incremental']
- ]
- },
- 'rcode': {
- 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
- 'lines': [
- ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
- ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
- ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
- ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
- ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
- ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
- ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = NSD_CONTROL_COMMAND
-
- def _get_data(self):
- lines = self._get_raw_data()
- if not lines:
- return None
-
- stats = dict(
- (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
- )
- stats.setdefault('num_opcode_NOTIFY', 0)
- stats.setdefault('num_type_TYPE252', 0)
- stats.setdefault('num_type_TYPE255', 0)
-
- return stats
diff --git a/collectors/python.d.plugin/nsd/nsd.conf b/collectors/python.d.plugin/nsd/nsd.conf
deleted file mode 100644
index 77a8a3177..000000000
--- a/collectors/python.d.plugin/nsd/nsd.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# netdata python.d.plugin configuration for nsd
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# nsd-control is slow, so once every 30 seconds
-# update_every: 30
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, nsd also supports the following:
-#
-# command: 'nsd-control stats_noreset' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# IMPORTANT Information
-#
-# Netdata must have permissions to run `nsd-control stats_noreset` command
-#
-# - Example-1 (use "sudo")
-# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
-# Defaults:netdata !requiretty
-# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
-# 2. etc/netdata/python.d/nsd.conf
-# local:
-# update_every: 30
-# command: 'sudo /usr/sbin/nsd-control stats_noreset'
-#
-# - Example-2 (add "netdata" user to "nsd" group)
-# usermod -aG nsd netdata
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- update_every: 30
- command: 'nsd-control stats_noreset'
diff --git a/collectors/python.d.plugin/nvidia_smi/Makefile.inc b/collectors/python.d.plugin/nvidia_smi/Makefile.inc
deleted file mode 100644
index 52fb25a68..000000000
--- a/collectors/python.d.plugin/nvidia_smi/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += nvidia_smi/nvidia_smi.chart.py
-dist_pythonconfig_DATA += nvidia_smi/nvidia_smi.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += nvidia_smi/README.md nvidia_smi/Makefile.inc
-
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
deleted file mode 100644
index 7d45289a4..000000000
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-<!--
-title: "Nvidia GPU monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md"
-sidebar_label: "nvidia_smi-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Devices"
--->
-
-# Nvidia GPU collector
-
-Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
-
-## Requirements and Notes
-
-- You must have the `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
-- You must enable this plugin, as its disabled by default due to minor performance issues:
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
- Remove the '#' before nvidia_smi so it reads: `nvidia_smi: yes`.
-
-- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
-- Currently the `nvidia-smi` tool is being queried via cli. Updating the plugin to use the nvidia c/c++ API directly should resolve this issue. See discussion here: <https://github.com/netdata/netdata/pull/4357>
-- Contributions are welcome.
-- Make sure `netdata` user can execute `/usr/bin/nvidia-smi` or wherever your binary is.
-- If `nvidia-smi` process [is not killed after netdata restart](https://github.com/netdata/netdata/issues/7143) you need to off `loop_mode`.
-- `poll_seconds` is how often in seconds the tool is polled for as an integer.
-
-## Charts
-
-It produces the following charts:
-
-- PCI Express Bandwidth Utilization in `KiB/s`
-- Fan Speed in `percentage`
-- GPU Utilization in `percentage`
-- Memory Bandwidth Utilization in `percentage`
-- Encoder/Decoder Utilization in `percentage`
-- Memory Usage in `MiB`
-- Temperature in `celsius`
-- Clock Frequencies in `MHz`
-- Power Utilization in `Watts`
-- Memory Used by Each Process in `MiB`
-- Memory Used by Each User in `MiB`
-- Number of User on GPU in `num`
-
-## Configuration
-
-Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
-directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/nvidia_smi.conf
-```
-
-Sample:
-
-```yaml
-loop_mode : yes
-poll_seconds : 1
-exclude_zero_memory_users : yes
-```
-
-
-### Troubleshooting
-
-To troubleshoot issues with the `nvidia_smi` module, run the `python.d.plugin` with the debug option enabled. The
-output will give you the output of the data collection job or error messages on why the collector isn't working.
-
-First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
-not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
-plugin's directory, switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-Now you can manually run the `nvidia_smi` module in debug mode:
-
-```bash
-./python.d.plugin nvidia_smi debug trace
-```
-
-## Docker
-
-GPU monitoring in a docker container is possible with [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) installed on the host system, and `gcompat` added to the `NETDATA_EXTRA_APK_PACKAGES` environment variable.
-
-Sample `docker-compose.yml`
-```yaml
-version: '3'
-services:
- netdata:
- image: netdata/netdata
- container_name: netdata
- hostname: example.com # set to fqdn of host
- ports:
- - 19999:19999
- restart: unless-stopped
- cap_add:
- - SYS_PTRACE
- security_opt:
- - apparmor:unconfined
- environment:
- - NETDATA_EXTRA_APK_PACKAGES=gcompat
- volumes:
- - netdataconfig:/etc/netdata
- - netdatalib:/var/lib/netdata
- - netdatacache:/var/cache/netdata
- - /etc/passwd:/host/etc/passwd:ro
- - /etc/group:/host/etc/group:ro
- - /proc:/host/proc:ro
- - /sys:/host/sys:ro
- - /etc/os-release:/host/etc/os-release:ro
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- count: all
- capabilities: [gpu]
-
-volumes:
- netdataconfig:
- netdatalib:
- netdatacache:
-```
-
-Sample `docker run`
-```yaml
-docker run -d --name=netdata \
- -p 19999:19999 \
- -e NETDATA_EXTRA_APK_PACKAGES=gcompat \
- -v netdataconfig:/etc/netdata \
- -v netdatalib:/var/lib/netdata \
- -v netdatacache:/var/cache/netdata \
- -v /etc/passwd:/host/etc/passwd:ro \
- -v /etc/group:/host/etc/group:ro \
- -v /proc:/host/proc:ro \
- -v /sys:/host/sys:ro \
- -v /etc/os-release:/host/etc/os-release:ro \
- --restart unless-stopped \
- --cap-add SYS_PTRACE \
- --security-opt apparmor=unconfined \
- --gpus all \
- netdata/netdata
-```
-
-### Docker Troubleshooting
-To troubleshoot `nvidia-smi` in a docker container, first confirm that `nvidia-smi` is working on the host system. If that is working correctly, run `docker exec -it netdata nvidia-smi` to confirm it's working within the docker container. If `nvidia-smi` is fuctioning both inside and outside of the container, confirm that `nvidia-smi: yes` is uncommented in `python.d.conf`.
-```bash
-docker exec -it netdata bash
-cd /etc/netdata
-./edit-config python.d.conf
-```
diff --git a/collectors/python.d.plugin/nvidia_smi/metadata.yaml b/collectors/python.d.plugin/nvidia_smi/metadata.yaml
deleted file mode 100644
index 9bf1e6ca7..000000000
--- a/collectors/python.d.plugin/nvidia_smi/metadata.yaml
+++ /dev/null
@@ -1,166 +0,0 @@
-# This collector will not appear in documentation, as the go version is preferred,
-# https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/README.md
-#
-# meta:
-# plugin_name: python.d.plugin
-# module_name: nvidia_smi
-# monitored_instance:
-# name: python.d nvidia_smi
-# link: ''
-# categories: []
-# icon_filename: ''
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ''
-# keywords: []
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: ''
-# method_description: ''
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ''
-# default_behavior:
-# auto_detection:
-# description: ''
-# limits:
-# description: ''
-# performance_impact:
-# description: ''
-# setup:
-# prerequisites:
-# list: []
-# configuration:
-# file:
-# name: ''
-# description: ''
-# options:
-# description: ''
-# folding:
-# title: ''
-# enabled: true
-# list: []
-# examples:
-# folding:
-# enabled: true
-# title: ''
-# list: []
-# troubleshooting:
-# problems:
-# list: []
-# alerts: []
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: GPU
-# description: ""
-# labels: []
-# metrics:
-# - name: nvidia_smi.pci_bandwidth
-# description: PCI Express Bandwidth Utilization
-# unit: "KiB/s"
-# chart_type: area
-# dimensions:
-# - name: rx
-# - name: tx
-# - name: nvidia_smi.pci_bandwidth_percent
-# description: PCI Express Bandwidth Percent
-# unit: "percentage"
-# chart_type: area
-# dimensions:
-# - name: rx_percent
-# - name: tx_percent
-# - name: nvidia_smi.fan_speed
-# description: Fan Speed
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: speed
-# - name: nvidia_smi.gpu_utilization
-# description: GPU Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: utilization
-# - name: nvidia_smi.mem_utilization
-# description: Memory Bandwidth Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: utilization
-# - name: nvidia_smi.encoder_utilization
-# description: Encoder/Decoder Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: encoder
-# - name: decoder
-# - name: nvidia_smi.memory_allocated
-# description: Memory Usage
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: free
-# - name: used
-# - name: nvidia_smi.bar1_memory_usage
-# description: Bar1 Memory Usage
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: free
-# - name: used
-# - name: nvidia_smi.temperature
-# description: Temperature
-# unit: "celsius"
-# chart_type: line
-# dimensions:
-# - name: temp
-# - name: nvidia_smi.clocks
-# description: Clock Frequencies
-# unit: "MHz"
-# chart_type: line
-# dimensions:
-# - name: graphics
-# - name: video
-# - name: sm
-# - name: mem
-# - name: nvidia_smi.power
-# description: Power Utilization
-# unit: "Watts"
-# chart_type: line
-# dimensions:
-# - name: power
-# - name: nvidia_smi.power_state
-# description: Power State
-# unit: "state"
-# chart_type: line
-# dimensions:
-# - name: a dimension per {power_state}
-# - name: nvidia_smi.processes_mem
-# description: Memory Used by Each Process
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per process
-# - name: nvidia_smi.user_mem
-# description: Memory Used by Each User
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per user
-# - name: nvidia_smi.user_num
-# description: Number of User on GPU
-# unit: "num"
-# chart_type: line
-# dimensions:
-# - name: users
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
deleted file mode 100644
index 556a61435..000000000
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nvidia-smi netdata python.d module
-# Original Author: Steven Noonan (tycho)
-# Author: Ilya Mashchenko (ilyam8)
-# User Memory Stat Author: Guido Scatena (scatenag)
-
-import os
-import pwd
-import subprocess
-import threading
-import xml.etree.ElementTree as et
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-NVIDIA_SMI = 'nvidia-smi'
-
-NOT_AVAILABLE = 'N/A'
-
-EMPTY_ROW = ''
-EMPTY_ROW_LIMIT = 500
-POLLER_BREAK_ROW = '</nvidia_smi_log>'
-
-PCI_BANDWIDTH = 'pci_bandwidth'
-PCI_BANDWIDTH_PERCENT = 'pci_bandwidth_percent'
-FAN_SPEED = 'fan_speed'
-GPU_UTIL = 'gpu_utilization'
-MEM_UTIL = 'mem_utilization'
-ENCODER_UTIL = 'encoder_utilization'
-MEM_USAGE = 'mem_usage'
-BAR_USAGE = 'bar1_mem_usage'
-TEMPERATURE = 'temperature'
-CLOCKS = 'clocks'
-POWER = 'power'
-POWER_STATE = 'power_state'
-PROCESSES_MEM = 'processes_mem'
-USER_MEM = 'user_mem'
-USER_NUM = 'user_num'
-
-ORDER = [
- PCI_BANDWIDTH,
- PCI_BANDWIDTH_PERCENT,
- FAN_SPEED,
- GPU_UTIL,
- MEM_UTIL,
- ENCODER_UTIL,
- MEM_USAGE,
- BAR_USAGE,
- TEMPERATURE,
- CLOCKS,
- POWER,
- POWER_STATE,
- PROCESSES_MEM,
- USER_MEM,
- USER_NUM,
-]
-
-# https://docs.nvidia.com/gameworks/content/gameworkslibrary/coresdk/nvapi/group__gpupstate.html
-POWER_STATES = ['P' + str(i) for i in range(0, 16)]
-
-# PCI Transfer data rate in gigabits per second (Gb/s) per generation
-PCI_SPEED = {
- "1": 2.5,
- "2": 5,
- "3": 8,
- "4": 16,
- "5": 32
-}
-# PCI encoding per generation
-PCI_ENCODING = {
- "1": 2 / 10,
- "2": 2 / 10,
- "3": 2 / 130,
- "4": 2 / 130,
- "5": 2 / 130
-}
-
-
-def gpu_charts(gpu):
- fam = gpu.full_name()
-
- charts = {
- PCI_BANDWIDTH: {
- 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
- 'lines': [
- ['rx_util', 'rx', 'absolute', 1, 1],
- ['tx_util', 'tx', 'absolute', 1, -1],
- ]
- },
- PCI_BANDWIDTH_PERCENT: {
- 'options': [None, 'PCI Express Bandwidth Percent', 'percentage', fam, 'nvidia_smi.pci_bandwidth_percent',
- 'area'],
- 'lines': [
- ['rx_util_percent', 'rx_percent'],
- ['tx_util_percent', 'tx_percent'],
- ]
- },
- FAN_SPEED: {
- 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
- 'lines': [
- ['fan_speed', 'speed'],
- ]
- },
- GPU_UTIL: {
- 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
- 'lines': [
- ['gpu_util', 'utilization'],
- ]
- },
- MEM_UTIL: {
- 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
- 'lines': [
- ['memory_util', 'utilization'],
- ]
- },
- ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
- 'line'],
- 'lines': [
- ['encoder_util', 'encoder'],
- ['decoder_util', 'decoder'],
- ]
- },
- MEM_USAGE: {
- 'options': [None, 'Memory Usage', 'MiB', fam, 'nvidia_smi.memory_allocated', 'stacked'],
- 'lines': [
- ['fb_memory_free', 'free'],
- ['fb_memory_used', 'used'],
- ]
- },
- BAR_USAGE: {
- 'options': [None, 'Bar1 Memory Usage', 'MiB', fam, 'nvidia_smi.bar1_memory_usage', 'stacked'],
- 'lines': [
- ['bar1_memory_free', 'free'],
- ['bar1_memory_used', 'used'],
- ]
- },
- TEMPERATURE: {
- 'options': [None, 'Temperature', 'celsius', fam, 'nvidia_smi.temperature', 'line'],
- 'lines': [
- ['gpu_temp', 'temp'],
- ]
- },
- CLOCKS: {
- 'options': [None, 'Clock Frequencies', 'MHz', fam, 'nvidia_smi.clocks', 'line'],
- 'lines': [
- ['graphics_clock', 'graphics'],
- ['video_clock', 'video'],
- ['sm_clock', 'sm'],
- ['mem_clock', 'mem'],
- ]
- },
- POWER: {
- 'options': [None, 'Power Utilization', 'Watts', fam, 'nvidia_smi.power', 'line'],
- 'lines': [
- ['power_draw', 'power', 'absolute', 1, 100],
- ]
- },
- POWER_STATE: {
- 'options': [None, 'Power State', 'state', fam, 'nvidia_smi.power_state', 'line'],
- 'lines': [['power_state_' + v.lower(), v, 'absolute'] for v in POWER_STATES]
- },
- PROCESSES_MEM: {
- 'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
- 'lines': []
- },
- USER_MEM: {
- 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
- 'lines': []
- },
- USER_NUM: {
- 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
- 'lines': [
- ['user_num', 'users'],
- ]
- },
- }
-
- idx = gpu.num
-
- order = ['gpu{0}_{1}'.format(idx, v) for v in ORDER]
- charts = dict(('gpu{0}_{1}'.format(idx, k), v) for k, v in charts.items())
-
- for chart in charts.values():
- for line in chart['lines']:
- line[0] = 'gpu{0}_{1}'.format(idx, line[0])
-
- return order, charts
-
-
-class NvidiaSMI:
- def __init__(self):
- self.command = find_binary(NVIDIA_SMI)
- self.active_proc = None
-
- def run_once(self):
- proc = subprocess.Popen([self.command, '-x', '-q'], stdout=subprocess.PIPE)
- stdout, _ = proc.communicate()
- return stdout
-
- def run_loop(self, interval):
- if self.active_proc:
- self.kill()
- proc = subprocess.Popen([self.command, '-x', '-q', '-l', str(interval)], stdout=subprocess.PIPE)
- self.active_proc = proc
- return proc.stdout
-
- def kill(self):
- if self.active_proc:
- self.active_proc.kill()
- self.active_proc = None
-
-
-class NvidiaSMIPoller(threading.Thread):
- def __init__(self, poll_interval):
- threading.Thread.__init__(self)
- self.daemon = True
-
- self.smi = NvidiaSMI()
- self.interval = poll_interval
-
- self.lock = threading.RLock()
- self.last_data = str()
- self.exit = False
- self.empty_rows = 0
- self.rows = list()
-
- def has_smi(self):
- return bool(self.smi.command)
-
- def run_once(self):
- return self.smi.run_once()
-
- def run(self):
- out = self.smi.run_loop(self.interval)
-
- for row in out:
- if self.exit or self.empty_rows > EMPTY_ROW_LIMIT:
- break
- self.process_row(row)
- self.smi.kill()
-
- def process_row(self, row):
- row = row.decode()
- self.empty_rows += (row == EMPTY_ROW)
- self.rows.append(row)
-
- if POLLER_BREAK_ROW in row:
- self.lock.acquire()
- self.last_data = '\n'.join(self.rows)
- self.lock.release()
-
- self.rows = list()
- self.empty_rows = 0
-
- def is_started(self):
- return self.ident is not None
-
- def shutdown(self):
- self.exit = True
-
- def data(self):
- self.lock.acquire()
- data = self.last_data
- self.lock.release()
- return data
-
-
-def handle_attr_error(method):
- def on_call(*args, **kwargs):
- try:
- return method(*args, **kwargs)
- except AttributeError:
- return None
-
- return on_call
-
-
-def handle_value_error(method):
- def on_call(*args, **kwargs):
- try:
- return method(*args, **kwargs)
- except ValueError:
- return None
-
- return on_call
-
-
-HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
-ETC_PASSWD_PATH = '/etc/passwd'
-PROC_PATH = '/proc'
-
-IS_INSIDE_DOCKER = False
-
-if HOST_PREFIX:
- ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
- PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
- IS_INSIDE_DOCKER = True
-
-
-def read_passwd_file():
- data = dict()
- with open(ETC_PASSWD_PATH, 'r') as f:
- for line in f:
- line = line.strip()
- if line.startswith("#"):
- continue
- fields = line.split(":")
- # name, passwd, uid, gid, comment, home_dir, shell
- if len(fields) != 7:
- continue
- # uid, guid
- fields[2], fields[3] = int(fields[2]), int(fields[3])
- data[fields[2]] = fields
- return data
-
-
-def read_passwd_file_safe():
- try:
- if IS_INSIDE_DOCKER:
- return read_passwd_file()
- return dict((k[2], k) for k in pwd.getpwall())
- except (OSError, IOError):
- return dict()
-
-
-def get_username_by_pid_safe(pid, passwd_file):
- path = os.path.join(PROC_PATH, pid)
- try:
- uid = os.stat(path).st_uid
- except (OSError, IOError):
- return ''
- try:
- if IS_INSIDE_DOCKER:
- return passwd_file[uid][0]
- return pwd.getpwuid(uid)[0]
- except KeyError:
- return str(uid)
-
-
-class GPU:
- def __init__(self, num, root, exclude_zero_memory_users=False):
- self.num = num
- self.root = root
- self.exclude_zero_memory_users = exclude_zero_memory_users
-
- def id(self):
- return self.root.get('id')
-
- def name(self):
- return self.root.find('product_name').text
-
- def full_name(self):
- return 'gpu{0} {1}'.format(self.num, self.name())
-
- @handle_attr_error
- def pci_link_gen(self):
- return self.root.find('pci').find('pci_gpu_link_info').find('pcie_gen').find('max_link_gen').text
-
- @handle_attr_error
- def pci_link_width(self):
- info = self.root.find('pci').find('pci_gpu_link_info')
- return info.find('link_widths').find('max_link_width').text.split('x')[0]
-
- def pci_bw_max(self):
- link_gen = self.pci_link_gen()
- link_width = int(self.pci_link_width())
- if link_gen not in PCI_SPEED or link_gen not in PCI_ENCODING or not link_width:
- return None
- # Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s.
- # see details https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance
- # return max bandwidth in kilobytes per second (kB/s)
- return (PCI_SPEED[link_gen] * link_width * (1 - PCI_ENCODING[link_gen]) - 1) * 1000 * 1000 / 8
-
- @handle_attr_error
- def rx_util(self):
- return self.root.find('pci').find('rx_util').text.split()[0]
-
- @handle_attr_error
- def tx_util(self):
- return self.root.find('pci').find('tx_util').text.split()[0]
-
- @handle_attr_error
- def fan_speed(self):
- return self.root.find('fan_speed').text.split()[0]
-
- @handle_attr_error
- def gpu_util(self):
- return self.root.find('utilization').find('gpu_util').text.split()[0]
-
- @handle_attr_error
- def memory_util(self):
- return self.root.find('utilization').find('memory_util').text.split()[0]
-
- @handle_attr_error
- def encoder_util(self):
- return self.root.find('utilization').find('encoder_util').text.split()[0]
-
- @handle_attr_error
- def decoder_util(self):
- return self.root.find('utilization').find('decoder_util').text.split()[0]
-
- @handle_attr_error
- def fb_memory_used(self):
- return self.root.find('fb_memory_usage').find('used').text.split()[0]
-
- @handle_attr_error
- def fb_memory_free(self):
- return self.root.find('fb_memory_usage').find('free').text.split()[0]
-
- @handle_attr_error
- def bar1_memory_used(self):
- return self.root.find('bar1_memory_usage').find('used').text.split()[0]
-
- @handle_attr_error
- def bar1_memory_free(self):
- return self.root.find('bar1_memory_usage').find('free').text.split()[0]
-
- @handle_attr_error
- def temperature(self):
- return self.root.find('temperature').find('gpu_temp').text.split()[0]
-
- @handle_attr_error
- def graphics_clock(self):
- return self.root.find('clocks').find('graphics_clock').text.split()[0]
-
- @handle_attr_error
- def video_clock(self):
- return self.root.find('clocks').find('video_clock').text.split()[0]
-
- @handle_attr_error
- def sm_clock(self):
- return self.root.find('clocks').find('sm_clock').text.split()[0]
-
- @handle_attr_error
- def mem_clock(self):
- return self.root.find('clocks').find('mem_clock').text.split()[0]
-
- @handle_attr_error
- def power_readings(self):
- elem = self.root.find('power_readings')
- return elem if elem else self.root.find('gpu_power_readings')
-
- @handle_attr_error
- def power_state(self):
- return str(self.power_readings().find('power_state').text.split()[0])
-
- @handle_value_error
- @handle_attr_error
- def power_draw(self):
- return float(self.power_readings().find('power_draw').text.split()[0]) * 100
-
- @handle_attr_error
- def processes(self):
- processes_info = self.root.find('processes').findall('process_info')
- if not processes_info:
- return list()
-
- passwd_file = read_passwd_file_safe()
- processes = list()
-
- for info in processes_info:
- pid = info.find('pid').text
- processes.append({
- 'pid': int(pid),
- 'process_name': info.find('process_name').text,
- 'used_memory': int(info.find('used_memory').text.split()[0]),
- 'username': get_username_by_pid_safe(pid, passwd_file),
- })
- return processes
-
- def data(self):
- data = {
- 'rx_util': self.rx_util(),
- 'tx_util': self.tx_util(),
- 'fan_speed': self.fan_speed(),
- 'gpu_util': self.gpu_util(),
- 'memory_util': self.memory_util(),
- 'encoder_util': self.encoder_util(),
- 'decoder_util': self.decoder_util(),
- 'fb_memory_used': self.fb_memory_used(),
- 'fb_memory_free': self.fb_memory_free(),
- 'bar1_memory_used': self.bar1_memory_used(),
- 'bar1_memory_free': self.bar1_memory_free(),
- 'gpu_temp': self.temperature(),
- 'graphics_clock': self.graphics_clock(),
- 'video_clock': self.video_clock(),
- 'sm_clock': self.sm_clock(),
- 'mem_clock': self.mem_clock(),
- 'power_draw': self.power_draw(),
- }
-
- if self.rx_util() != NOT_AVAILABLE and self.tx_util() != NOT_AVAILABLE:
- pci_bw_max = self.pci_bw_max()
- if not pci_bw_max:
- data['rx_util_percent'] = 0
- data['tx_util_percent'] = 0
- else:
- data['rx_util_percent'] = str(int(int(self.rx_util()) * 100 / self.pci_bw_max()))
- data['tx_util_percent'] = str(int(int(self.tx_util()) * 100 / self.pci_bw_max()))
-
- for v in POWER_STATES:
- data['power_state_' + v.lower()] = 0
- p_state = self.power_state()
- if p_state:
- data['power_state_' + p_state.lower()] = 1
-
- processes = self.processes() or []
- users = set()
- for p in processes:
- data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
- if p['username']:
- if self.exclude_zero_memory_users and p['used_memory'] == 0:
- continue
- users.add(p['username'])
- key = 'user_mem_{0}'.format(p['username'])
- if key in data:
- data[key] += p['used_memory']
- else:
- data[key] = p['used_memory']
- data['user_num'] = len(users)
-
- return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.loop_mode = configuration.get('loop_mode', True)
- poll = int(configuration.get('poll_seconds', self.get_update_every()))
- self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
- self.poller = NvidiaSMIPoller(poll)
-
- def get_data_loop_mode(self):
- if not self.poller.is_started():
- self.poller.start()
-
- if not self.poller.is_alive():
- self.debug('poller is off')
- return None
-
- return self.poller.data()
-
- def get_data_normal_mode(self):
- return self.poller.run_once()
-
- def get_data(self):
- if self.loop_mode:
- last_data = self.get_data_loop_mode()
- else:
- last_data = self.get_data_normal_mode()
-
- if not last_data:
- return None
-
- parsed = self.parse_xml(last_data)
- if parsed is None:
- return None
-
- data = dict()
- for idx, root in enumerate(parsed.findall('gpu')):
- gpu = GPU(idx, root, self.exclude_zero_memory_users)
- gpu_data = gpu.data()
- # self.debug(gpu_data)
- gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
- data.update(gpu_data)
- self.update_processes_mem_chart(gpu)
- self.update_processes_user_mem_chart(gpu)
-
- return data or None
-
- def update_processes_mem_chart(self, gpu):
- ps = gpu.processes()
- if not ps:
- return
- chart = self.charts['gpu{0}_{1}'.format(gpu.num, PROCESSES_MEM)]
- active_dim_ids = []
- for p in ps:
- dim_id = 'gpu{0}_process_mem_{1}'.format(gpu.num, p['pid'])
- active_dim_ids.append(dim_id)
- if dim_id not in chart:
- chart.add_dimension([dim_id, '{0} {1}'.format(p['pid'], p['process_name'])])
- for dim in chart:
- if dim.id not in active_dim_ids:
- chart.del_dimension(dim.id, hide=False)
-
- def update_processes_user_mem_chart(self, gpu):
- ps = gpu.processes()
- if not ps:
- return
- chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
- active_dim_ids = []
- for p in ps:
- if not p.get('username'):
- continue
- dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
- active_dim_ids.append(dim_id)
- if dim_id not in chart:
- chart.add_dimension([dim_id, '{0}'.format(p['username'])])
-
- for dim in chart:
- if dim.id not in active_dim_ids:
- chart.del_dimension(dim.id, hide=False)
-
- def check(self):
- if not self.poller.has_smi():
- self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
- return False
-
- raw_data = self.poller.run_once()
- if not raw_data:
- self.error("failed to invoke '{0}' binary".format(NVIDIA_SMI))
- return False
-
- parsed = self.parse_xml(raw_data)
- if parsed is None:
- return False
-
- gpus = parsed.findall('gpu')
- if not gpus:
- return False
-
- self.create_charts(gpus)
-
- return True
-
- def parse_xml(self, data):
- try:
- return et.fromstring(data)
- except et.ParseError as error:
- self.error('xml parse failed: "{0}", error: {1}'.format(data, error))
-
- return None
-
- def create_charts(self, gpus):
- for idx, root in enumerate(gpus):
- order, charts = gpu_charts(GPU(idx, root))
- self.order.extend(order)
- self.definitions.update(charts)
-
-
-def is_gpu_data_value_valid(value):
- try:
- int(value)
- except (TypeError, ValueError):
- return False
- return True
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
deleted file mode 100644
index 3d2a30d41..000000000
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for nvidia_smi
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
-# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
-# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
-#
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/openldap/Makefile.inc b/collectors/python.d.plugin/openldap/Makefile.inc
deleted file mode 100644
index dc947e214..000000000
--- a/collectors/python.d.plugin/openldap/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += openldap/openldap.chart.py
-dist_pythonconfig_DATA += openldap/openldap.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += openldap/README.md openldap/Makefile.inc
-
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
deleted file mode 120000
index 45f36b9b9..000000000
--- a/collectors/python.d.plugin/openldap/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/openldap.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/openldap/integrations/openldap.md b/collectors/python.d.plugin/openldap/integrations/openldap.md
deleted file mode 100644
index a9480a490..000000000
--- a/collectors/python.d.plugin/openldap/integrations/openldap.md
+++ /dev/null
@@ -1,215 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/metadata.yaml"
-sidebar_label: "OpenLDAP"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Authentication and Authorization"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# OpenLDAP
-
-
-<img src="https://netdata.cloud/img/statsd.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: openldap
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors OpenLDAP metrics about connections, operations, referrals and more.
-
-Statistics are taken from the monitoring interface of a openLDAP (slapd) server
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This collector doesn't work until all the prerequisites are checked.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per OpenLDAP instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| openldap.total_connections | connections | connections/s |
-| openldap.traffic_stats | sent | KiB/s |
-| openldap.operations_status | completed, initiated | ops/s |
-| openldap.referrals | sent | referrals/s |
-| openldap.entries | sent | entries/s |
-| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |
-| openldap.waiters | write, read | waiters/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure the openLDAP server to expose metrics to monitor it.
-
-Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
-
-
-#### Install python-ldap module
-
-Install python ldap module
-
-1. From pip package manager
-
-```bash
-pip install ldap
-```
-
-2. With apt package manager (in most deb based distros)
-
-
-```bash
-apt-get install python-ldap
-```
-
-
-3. With yum package manager (in most rpm based distros)
-
-
-```bash
-yum install python-ldap
-```
-
-
-#### Insert credentials for Netdata to access openLDAP server
-
-Use the `ldappasswd` utility to set a password for the username you will use.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/openldap.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/openldap.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| username | The bind user with right to access monitor statistics | | yes |
-| password | The password for the binded user | | yes |
-| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |
-| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |
-| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |
-| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |
-| cert_check | False if you want to ignore certificate check | True | yes |
-| timeout | Seconds to timeout if no connection exist | | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-username: "cn=admin"
-password: "pass"
-server: "localhost"
-port: "389"
-check_cert: True
-timeout: 1
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin openldap debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/openldap/metadata.yaml b/collectors/python.d.plugin/openldap/metadata.yaml
deleted file mode 100644
index 3826b22c7..000000000
--- a/collectors/python.d.plugin/openldap/metadata.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: openldap
- monitored_instance:
- name: OpenLDAP
- link: "https://www.openldap.org/"
- categories:
- - data-collection.authentication-and-authorization
- icon_filename: "statsd.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - openldap
- - RBAC
- - Directory access
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors OpenLDAP metrics about connections, operations, referrals and more."
- method_description: |
- Statistics are taken from the monitoring interface of a openLDAP (slapd) server
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- This collector doesn't work until all the prerequisites are checked.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure the openLDAP server to expose metrics to monitor it.
- description: |
- Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
- - title: Install python-ldap module
- description: |
- Install python ldap module
-
- 1. From pip package manager
-
- ```bash
- pip install ldap
- ```
-
- 2. With apt package manager (in most deb based distros)
-
-
- ```bash
- apt-get install python-ldap
- ```
-
-
- 3. With yum package manager (in most rpm based distros)
-
-
- ```bash
- yum install python-ldap
- ```
- - title: Insert credentials for Netdata to access openLDAP server
- description: |
- Use the `ldappasswd` utility to set a password for the username you will use.
- configuration:
- file:
- name: "python.d/openldap.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: username
- description: The bind user with right to access monitor statistics
- default_value: ""
- required: true
- - name: password
- description: The password for the binded user
- default_value: ""
- required: true
- - name: server
- description: The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
- default_value: ""
- required: true
- - name: port
- description: The listening port of the LDAP server. Change to 636 port in case of TLS connection.
- default_value: "389"
- required: true
- - name: use_tls
- description: Make True if a TLS connection is used over ldaps://
- default_value: False
- required: false
- - name: use_start_tls
- description: Make True if a TLS connection is used over ldap://
- default_value: False
- required: false
- - name: cert_check
- description: False if you want to ignore certificate check
- default_value: "True"
- required: true
- - name: timeout
- description: Seconds to timeout if no connection exist
- default_value: ""
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration.
- folding:
- enabled: false
- config: |
- username: "cn=admin"
- password: "pass"
- server: "localhost"
- port: "389"
- check_cert: True
- timeout: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: openldap.total_connections
- description: Total Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connections
- - name: openldap.traffic_stats
- description: Traffic
- unit: "KiB/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.operations_status
- description: Operations Status
- unit: "ops/s"
- chart_type: line
- dimensions:
- - name: completed
- - name: initiated
- - name: openldap.referrals
- description: Referrals
- unit: "referrals/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.entries
- description: Entries
- unit: "entries/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.ldap_operations
- description: Operations
- unit: "ops/s"
- chart_type: line
- dimensions:
- - name: bind
- - name: search
- - name: unbind
- - name: add
- - name: delete
- - name: modify
- - name: compare
- - name: openldap.waiters
- description: Waiters
- unit: "waiters/s"
- chart_type: line
- dimensions:
- - name: write
- - name: read
diff --git a/collectors/python.d.plugin/openldap/openldap.chart.py b/collectors/python.d.plugin/openldap/openldap.chart.py
deleted file mode 100644
index aba143954..000000000
--- a/collectors/python.d.plugin/openldap/openldap.chart.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: openldap netdata python.d module
-# Author: Manolis Kartsonakis (ekartsonakis)
-# SPDX-License-Identifier: GPL-3.0+
-
-try:
- import ldap
-
- HAS_LDAP = True
-except ImportError:
- HAS_LDAP = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-DEFAULT_SERVER = 'localhost'
-DEFAULT_PORT = '389'
-DEFAULT_TLS = False
-DEFAULT_CERT_CHECK = True
-DEFAULT_TIMEOUT = 1
-DEFAULT_START_TLS = False
-
-ORDER = [
- 'total_connections',
- 'bytes_sent',
- 'operations',
- 'referrals_sent',
- 'entries_sent',
- 'ldap_operations',
- 'waiters'
-]
-
-CHARTS = {
- 'total_connections': {
- 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'],
- 'lines': [
- ['total_connections', 'connections', 'incremental']
- ]
- },
- 'bytes_sent': {
- 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
- 'lines': [
- ['bytes_sent', 'sent', 'incremental', 1, 1024]
- ]
- },
- 'operations': {
- 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'],
- 'lines': [
- ['completed_operations', 'completed', 'incremental'],
- ['initiated_operations', 'initiated', 'incremental']
- ]
- },
- 'referrals_sent': {
- 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
- 'lines': [
- ['referrals_sent', 'sent', 'incremental']
- ]
- },
- 'entries_sent': {
- 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'],
- 'lines': [
- ['entries_sent', 'sent', 'incremental']
- ]
- },
- 'ldap_operations': {
- 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'],
- 'lines': [
- ['bind_operations', 'bind', 'incremental'],
- ['search_operations', 'search', 'incremental'],
- ['unbind_operations', 'unbind', 'incremental'],
- ['add_operations', 'add', 'incremental'],
- ['delete_operations', 'delete', 'incremental'],
- ['modify_operations', 'modify', 'incremental'],
- ['compare_operations', 'compare', 'incremental']
- ]
- },
- 'waiters': {
- 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'],
- 'lines': [
- ['write_waiters', 'write', 'incremental'],
- ['read_waiters', 'read', 'incremental']
- ]
- },
-}
-
-# Stuff to gather - make tuples of DN dn and attrib to get
-SEARCH_LIST = {
- 'total_connections': (
- 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter',
- ),
- 'bytes_sent': (
- 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'completed_operations': (
- 'cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'initiated_operations': (
- 'cn=Operations,cn=Monitor', 'monitorOpInitiated',
- ),
- 'referrals_sent': (
- 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'entries_sent': (
- 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'bind_operations': (
- 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'unbind_operations': (
- 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'add_operations': (
- 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
- ),
- 'delete_operations': (
- 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'modify_operations': (
- 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'compare_operations': (
- 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'search_operations': (
- 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'write_waiters': (
- 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter',
- ),
- 'read_waiters': (
- 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter',
- ),
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.server = configuration.get('server', DEFAULT_SERVER)
- self.port = configuration.get('port', DEFAULT_PORT)
- self.username = configuration.get('username')
- self.password = configuration.get('password')
- self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
- self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
- self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
- self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
- self.alive = False
- self.conn = None
-
- def disconnect(self):
- if self.conn:
- self.conn.unbind()
- self.conn = None
- self.alive = False
-
- def connect(self):
- try:
- if self.use_tls:
- self.conn = ldap.initialize('ldaps://%s:%s' % (self.server, self.port))
- else:
- self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
- self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
- if (self.use_tls or self.use_start_tls) and not self.cert_check:
- self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
- if self.use_start_tls or self.use_tls:
- self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
- if self.use_start_tls:
- self.conn.protocol_version = ldap.VERSION3
- self.conn.start_tls_s()
- if self.username and self.password:
- self.conn.simple_bind(self.username, self.password)
- except ldap.LDAPError as error:
- self.error(error)
- return False
-
- self.alive = True
- return True
-
- def reconnect(self):
- self.disconnect()
- return self.connect()
-
- def check(self):
- if not HAS_LDAP:
- self.error("'python-ldap' package is needed")
- return None
-
- return self.connect() and self.get_data()
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
- for key in SEARCH_LIST:
- dn = SEARCH_LIST[key][0]
- attr = SEARCH_LIST[key][1]
- try:
- num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
- result_type, result_data = self.conn.result(num, 1)
- except ldap.LDAPError as error:
- self.error("Empty result. Check bind username/password. Message: ", error)
- self.alive = False
- return None
-
- if result_type != 101:
- continue
-
- try:
- data[key] = int(list(result_data[0][1].values())[0][0])
- except (ValueError, IndexError) as error:
- self.debug(error)
- continue
-
- return data
diff --git a/collectors/python.d.plugin/openldap/openldap.conf b/collectors/python.d.plugin/openldap/openldap.conf
deleted file mode 100644
index 5fd99a525..000000000
--- a/collectors/python.d.plugin/openldap/openldap.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for openldap
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# postfix is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# ----------------------------------------------------------------------
-# OPENLDAP EXTRA PARAMETERS
-
-# Set here your LDAP connection settings
-
-#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
-#password : "yourpass" # The password for the binded user
-#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
-#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
-#use_tls : False # Make True if a TLS connection is used over ldaps://
-#use_start_tls: False # Make True if a TLS connection is used over ldap://
-#cert_check : True # False if you want to ignore certificate check
-#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/collectors/python.d.plugin/oracledb/Makefile.inc b/collectors/python.d.plugin/oracledb/Makefile.inc
deleted file mode 100644
index ea3a8240a..000000000
--- a/collectors/python.d.plugin/oracledb/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += oracledb/oracledb.chart.py
-dist_pythonconfig_DATA += oracledb/oracledb.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += oracledb/README.md oracledb/Makefile.inc
-
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
deleted file mode 120000
index a75e3611e..000000000
--- a/collectors/python.d.plugin/oracledb/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/oracle_db.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
deleted file mode 100644
index 30557c021..000000000
--- a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
+++ /dev/null
@@ -1,226 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/metadata.yaml"
-sidebar_label: "Oracle DB"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Oracle DB
-
-
-<img src="https://netdata.cloud/img/oracle.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: oracledb
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors OracleDB database metrics about sessions, tables, memory and more.
-
-It collects the metrics via the supported database client library
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When the requirements are met, databases on the local host on port 1521 will be auto-detected
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-These metrics refer to the entire monitored application.
-
-### Per Oracle DB instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| oracledb.session_count | total, active | sessions |
-| oracledb.session_limit_usage | usage | % |
-| oracledb.logons | logons | events/s |
-| oracledb.physical_disk_read_writes | reads, writes | events/s |
-| oracledb.sorts_on_disks | sorts | events/s |
-| oracledb.full_table_scans | full table scans | events/s |
-| oracledb.database_wait_time_ratio | wait time ratio | % |
-| oracledb.shared_pool_free_memory | free memory | % |
-| oracledb.in_memory_sorts_ratio | in-memory sorts | % |
-| oracledb.sql_service_response_time | time | seconds |
-| oracledb.user_rollbacks | rollbacks | events/s |
-| oracledb.enqueue_timeouts | enqueue timeouts | events/s |
-| oracledb.cache_hit_ration | buffer, cursor, library, row | % |
-| oracledb.global_cache_blocks | corrupted, lost | events/s |
-| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |
-| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |
-| oracledb.tablespace_size | a dimension per active tablespace | KiB |
-| oracledb.tablespace_usage | a dimension per active tablespace | KiB |
-| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |
-| oracledb.allocated_size | a dimension per active tablespace | B |
-| oracledb.allocated_usage | a dimension per active tablespace | B |
-| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install the python-oracledb package
-
-You can follow the official guide below to install the required package:
-
-Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
-
-
-#### Create a read only user for netdata
-
-Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
-
-Connect to your Oracle database with an administrative user and execute:
-
-```bash
-CREATE USER netdata IDENTIFIED BY <PASSWORD>;
-
-GRANT CONNECT TO netdata;
-GRANT SELECT_CATALOG_ROLE TO netdata;
-```
-
-
-#### Edit the configuration
-
-Edit the configuration troubleshooting:
-
-1. Provide a valid user for the netdata collector to access the database
-2. Specify the network target this database is listening.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/oracledb.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/oracledb.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| user | The username for the user account. | no | yes |
-| password | The password for the user account. | no | yes |
-| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |
-| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |
-| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration, two jobs described for two databases.
-
-```yaml
-local:
- user: 'netdata'
- password: 'secret'
- server: 'localhost:1521'
- service: 'XE'
- protocol: 'tcps'
-
-remote:
- user: 'netdata'
- password: 'secret'
- server: '10.0.0.1:1521'
- service: 'XE'
- protocol: 'tcps'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin oracledb debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/oracledb/metadata.yaml b/collectors/python.d.plugin/oracledb/metadata.yaml
deleted file mode 100644
index f2ab8312b..000000000
--- a/collectors/python.d.plugin/oracledb/metadata.yaml
+++ /dev/null
@@ -1,309 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: oracledb
- monitored_instance:
- name: Oracle DB
- link: "https://docs.oracle.com/en/database/oracle/oracle-database/"
- categories:
- - data-collection.database-servers
- icon_filename: "oracle.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - database
- - oracle
- - data warehouse
- - SQL
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors OracleDB database metrics about sessions, tables, memory and more."
- method_description: "It collects the metrics via the supported database client library"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: |
- In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
- default_behavior:
- auto_detection:
- description: "When the requirements are met, databases on the local host on port 1521 will be auto-detected"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Install the python-oracledb package
- description: |
- You can follow the official guide below to install the required package:
-
- Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
- - title: Create a read only user for netdata
- description: |
- Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
-
- Connect to your Oracle database with an administrative user and execute:
-
- ```bash
- CREATE USER netdata IDENTIFIED BY <PASSWORD>;
-
- GRANT CONNECT TO netdata;
- GRANT SELECT_CATALOG_ROLE TO netdata;
- ```
- - title: Edit the configuration
- description: |
- Edit the configuration troubleshooting:
-
- 1. Provide a valid user for the netdata collector to access the database
- 2. Specify the network target this database is listening.
- configuration:
- file:
- name: "python.d/oracledb.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: user
- description: The username for the user account.
- default_value: no
- required: true
- - name: password
- description: The password for the user account.
- default_value: no
- required: true
- - name: server
- description: The IP address or hostname (and port) of the Oracle Database Server.
- default_value: no
- required: true
- - name: service
- description: The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
- default_value: no
- required: true
- - name: protocol
- description: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic
- default_value: no
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration, two jobs described for two databases.
- config: |
- local:
- user: 'netdata'
- password: 'secret'
- server: 'localhost:1521'
- service: 'XE'
- protocol: 'tcps'
-
- remote:
- user: 'netdata'
- password: 'secret'
- server: '10.0.0.1:1521'
- service: 'XE'
- protocol: 'tcps'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "These metrics refer to the entire monitored application."
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: oracledb.session_count
- description: Session Count
- unit: "sessions"
- chart_type: line
- dimensions:
- - name: total
- - name: active
- - name: oracledb.session_limit_usage
- description: Session Limit Usage
- unit: "%"
- chart_type: area
- dimensions:
- - name: usage
- - name: oracledb.logons
- description: Logons
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: logons
- - name: oracledb.physical_disk_read_writes
- description: Physical Disk Reads/Writes
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: oracledb.sorts_on_disks
- description: Sorts On Disk
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: sorts
- - name: oracledb.full_table_scans
- description: Full Table Scans
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: full table scans
- - name: oracledb.database_wait_time_ratio
- description: Database Wait Time Ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: wait time ratio
- - name: oracledb.shared_pool_free_memory
- description: Shared Pool Free Memory
- unit: "%"
- chart_type: line
- dimensions:
- - name: free memory
- - name: oracledb.in_memory_sorts_ratio
- description: In-Memory Sorts Ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: in-memory sorts
- - name: oracledb.sql_service_response_time
- description: SQL Service Response Time
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: time
- - name: oracledb.user_rollbacks
- description: User Rollbacks
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: rollbacks
- - name: oracledb.enqueue_timeouts
- description: Enqueue Timeouts
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: enqueue timeouts
- - name: oracledb.cache_hit_ration
- description: Cache Hit Ratio
- unit: "%"
- chart_type: stacked
- dimensions:
- - name: buffer
- - name: cursor
- - name: library
- - name: row
- - name: oracledb.global_cache_blocks
- description: Global Cache Blocks Events
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: corrupted
- - name: lost
- - name: oracledb.activity
- description: Activities
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: parse count
- - name: execute count
- - name: user commits
- - name: user rollbacks
- - name: oracledb.wait_time
- description: Wait Time
- unit: "ms"
- chart_type: stacked
- dimensions:
- - name: application
- - name: configuration
- - name: administrative
- - name: concurrency
- - name: commit
- - name: network
- - name: user I/O
- - name: system I/O
- - name: scheduler
- - name: other
- - name: oracledb.tablespace_size
- description: Size
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.tablespace_usage
- description: Usage
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.tablespace_usage_in_percent
- description: Usage
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_size
- description: Size
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_usage
- description: Usage
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_usage_in_percent
- description: Usage
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
deleted file mode 100644
index 455cf270e..000000000
--- a/collectors/python.d.plugin/oracledb/oracledb.chart.py
+++ /dev/null
@@ -1,846 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: oracledb netdata python.d module
-# Author: ilyam8 (Ilya Mashchenko)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- import oracledb as cx_Oracle
-
- HAS_ORACLE_NEW = True
- HAS_ORACLE_OLD = False
-except ImportError:
- HAS_ORACLE_NEW = False
- try:
- import cx_Oracle
-
- HAS_ORACLE_OLD = True
- except ImportError:
- HAS_ORACLE_OLD = False
-
-ORDER = [
- 'session_count',
- 'session_limit_usage',
- 'logons',
- 'physical_disk_read_write',
- 'sorts_on_disk',
- 'full_table_scans',
- 'database_wait_time_ratio',
- 'shared_pool_free_memory',
- 'in_memory_sorts_ratio',
- 'sql_service_response_time',
- 'user_rollbacks',
- 'enqueue_timeouts',
- 'cache_hit_ratio',
- 'global_cache_blocks',
- 'activity',
- 'wait_time',
- 'tablespace_size',
- 'tablespace_usage',
- 'tablespace_usage_in_percent',
- 'allocated_size',
- 'allocated_usage',
- 'allocated_usage_in_percent',
-]
-
-CHARTS = {
- 'session_count': {
- 'options': [None, 'Session Count', 'sessions', 'session activity', 'oracledb.session_count', 'line'],
- 'lines': [
- ['session_count', 'total', 'absolute', 1, 1000],
- ['average_active_sessions', 'active', 'absolute', 1, 1000],
- ]
- },
- 'session_limit_usage': {
- 'options': [None, 'Session Limit Usage', '%', 'session activity', 'oracledb.session_limit_usage', 'area'],
- 'lines': [
- ['session_limit_percent', 'usage', 'absolute', 1, 1000],
- ]
- },
- 'logons': {
- 'options': [None, 'Logons', 'events/s', 'session activity', 'oracledb.logons', 'area'],
- 'lines': [
- ['logons_per_sec', 'logons', 'absolute', 1, 1000],
- ]
- },
- 'physical_disk_read_write': {
- 'options': [None, 'Physical Disk Reads/Writes', 'events/s', 'disk activity',
- 'oracledb.physical_disk_read_writes', 'area'],
- 'lines': [
- ['physical_reads_per_sec', 'reads', 'absolute', 1, 1000],
- ['physical_writes_per_sec', 'writes', 'absolute', -1, 1000],
- ]
- },
- 'sorts_on_disk': {
- 'options': [None, 'Sorts On Disk', 'events/s', 'disk activity', 'oracledb.sorts_on_disks', 'line'],
- 'lines': [
- ['disk_sort_per_sec', 'sorts', 'absolute', 1, 1000],
- ]
- },
- 'full_table_scans': {
- 'options': [None, 'Full Table Scans', 'events/s', 'disk activity', 'oracledb.full_table_scans', 'line'],
- 'lines': [
- ['long_table_scans_per_sec', 'full table scans', 'absolute', 1, 1000],
- ]
- },
- 'database_wait_time_ratio': {
- 'options': [None, 'Database Wait Time Ratio', '%', 'database and buffer activity',
- 'oracledb.database_wait_time_ratio', 'line'],
- 'lines': [
- ['database_wait_time_ratio', 'wait time ratio', 'absolute', 1, 1000],
- ]
- },
- 'shared_pool_free_memory': {
- 'options': [None, 'Shared Pool Free Memory', '%', 'database and buffer activity',
- 'oracledb.shared_pool_free_memory', 'line'],
- 'lines': [
- ['shared_pool_free_percent', 'free memory', 'absolute', 1, 1000],
- ]
- },
- 'in_memory_sorts_ratio': {
- 'options': [None, 'In-Memory Sorts Ratio', '%', 'database and buffer activity',
- 'oracledb.in_memory_sorts_ratio', 'line'],
- 'lines': [
- ['memory_sorts_ratio', 'in-memory sorts', 'absolute', 1, 1000],
- ]
- },
- 'sql_service_response_time': {
- 'options': [None, 'SQL Service Response Time', 'seconds', 'database and buffer activity',
- 'oracledb.sql_service_response_time', 'line'],
- 'lines': [
- ['sql_service_response_time', 'time', 'absolute', 1, 1000],
- ]
- },
- 'user_rollbacks': {
- 'options': [None, 'User Rollbacks', 'events/s', 'database and buffer activity',
- 'oracledb.user_rollbacks', 'line'],
- 'lines': [
- ['user_rollbacks_per_sec', 'rollbacks', 'absolute', 1, 1000],
- ]
- },
- 'enqueue_timeouts': {
- 'options': [None, 'Enqueue Timeouts', 'events/s', 'database and buffer activity',
- 'oracledb.enqueue_timeouts', 'line'],
- 'lines': [
- ['enqueue_timeouts_per_sec', 'enqueue timeouts', 'absolute', 1, 1000],
- ]
- },
- 'cache_hit_ratio': {
- 'options': [None, 'Cache Hit Ratio', '%', 'cache', 'oracledb.cache_hit_ration', 'stacked'],
- 'lines': [
- ['buffer_cache_hit_ratio', 'buffer', 'absolute', 1, 1000],
- ['cursor_cache_hit_ratio', 'cursor', 'absolute', 1, 1000],
- ['library_cache_hit_ratio', 'library', 'absolute', 1, 1000],
- ['row_cache_hit_ratio', 'row', 'absolute', 1, 1000],
- ]
- },
- 'global_cache_blocks': {
- 'options': [None, 'Global Cache Blocks Events', 'events/s', 'cache', 'oracledb.global_cache_blocks', 'area'],
- 'lines': [
- ['global_cache_blocks_corrupted', 'corrupted', 'incremental', 1, 1000],
- ['global_cache_blocks_lost', 'lost', 'incremental', 1, 1000],
- ]
- },
- 'activity': {
- 'options': [None, 'Activities', 'events/s', 'activities', 'oracledb.activity', 'stacked'],
- 'lines': [
- ['activity_parse_count_total', 'parse count', 'incremental', 1, 1000],
- ['activity_execute_count', 'execute count', 'incremental', 1, 1000],
- ['activity_user_commits', 'user commits', 'incremental', 1, 1000],
- ['activity_user_rollbacks', 'user rollbacks', 'incremental', 1, 1000],
- ]
- },
- 'wait_time': {
- 'options': [None, 'Wait Time', 'ms', 'wait time', 'oracledb.wait_time', 'stacked'],
- 'lines': [
- ['wait_time_application', 'application', 'absolute', 1, 1000],
- ['wait_time_configuration', 'configuration', 'absolute', 1, 1000],
- ['wait_time_administrative', 'administrative', 'absolute', 1, 1000],
- ['wait_time_concurrency', 'concurrency', 'absolute', 1, 1000],
- ['wait_time_commit', 'commit', 'absolute', 1, 1000],
- ['wait_time_network', 'network', 'absolute', 1, 1000],
- ['wait_time_user_io', 'user I/O', 'absolute', 1, 1000],
- ['wait_time_system_io', 'system I/O', 'absolute', 1, 1000],
- ['wait_time_scheduler', 'scheduler', 'absolute', 1, 1000],
- ['wait_time_other', 'other', 'absolute', 1, 1000],
- ]
- },
- 'tablespace_size': {
- 'options': [None, 'Size', 'KiB', 'tablespace', 'oracledb.tablespace_size', 'line'],
- 'lines': [],
- },
- 'tablespace_usage': {
- 'options': [None, 'Usage', 'KiB', 'tablespace', 'oracledb.tablespace_usage', 'line'],
- 'lines': [],
- },
- 'tablespace_usage_in_percent': {
- 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
- 'lines': [],
- },
- 'allocated_size': {
- 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
- 'lines': [],
- },
- 'allocated_usage': {
- 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
- 'lines': [],
- },
- 'allocated_usage_in_percent': {
- 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
- 'lines': [],
- },
-}
-
-CX_CONNECT_STRING_OLD = "{0}/{1}@//{2}/{3}"
-
-QUERY_SYSTEM = '''
-SELECT
- metric_name,
- value
-FROM
- gv$sysmetric
-ORDER BY
- begin_time
-'''
-QUERY_TABLESPACE = '''
-SELECT
- m.tablespace_name,
- m.used_space * t.block_size AS used_bytes,
- m.tablespace_size * t.block_size AS max_bytes,
- m.used_percent
-FROM
- dba_tablespace_usage_metrics m
- JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
-'''
-QUERY_ALLOCATED = '''
-SELECT
- nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
- bytes_alloc used_bytes,
- bytes_alloc-nvl(bytes_free,0) max_bytes,
- ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
-FROM
- (SELECT
- sum(bytes) bytes_free,
- tablespace_name
- FROM sys.dba_free_space
- GROUP BY tablespace_name
- ) a,
- (SELECT
- sum(bytes) bytes_alloc,
- tablespace_name
- FROM sys.dba_data_files
- GROUP BY tablespace_name
- ) b
-WHERE a.tablespace_name (+) = b.tablespace_name
-'''
-QUERY_ACTIVITIES_COUNT = '''
-SELECT
- name,
- value
-FROM
- v$sysstat
-WHERE
- name IN (
- 'parse count (total)',
- 'execute count',
- 'user commits',
- 'user rollbacks'
- )
-'''
-QUERY_WAIT_TIME = '''
-SELECT
- n.wait_class,
- round(m.time_waited / m.INTSIZE_CSEC, 3)
-FROM
- v$waitclassmetric m,
- v$system_wait_class n
-WHERE
- m.wait_class_id = n.wait_class_id
- AND n.wait_class != 'Idle'
-'''
-# QUERY_SESSION_COUNT = '''
-# SELECT
-# status,
-# type
-# FROM
-# v$session
-# GROUP BY
-# status,
-# type
-# '''
-# QUERY_PROCESSES_COUNT = '''
-# SELECT
-# COUNT(*)
-# FROM
-# v$process
-# '''
-# QUERY_PROCESS = '''
-# SELECT
-# program,
-# pga_used_mem,
-# pga_alloc_mem,
-# pga_freeable_mem,
-# pga_max_mem
-# FROM
-# gv$process
-# '''
-
-# PROCESS_METRICS = [
-# 'pga_used_memory',
-# 'pga_allocated_memory',
-# 'pga_freeable_memory',
-# 'pga_maximum_memory',
-# ]
-
-
-SYS_METRICS = {
- 'Average Active Sessions': 'average_active_sessions',
- 'Session Count': 'session_count',
- 'Session Limit %': 'session_limit_percent',
- 'Logons Per Sec': 'logons_per_sec',
- 'Physical Reads Per Sec': 'physical_reads_per_sec',
- 'Physical Writes Per Sec': 'physical_writes_per_sec',
- 'Disk Sort Per Sec': 'disk_sort_per_sec',
- 'Long Table Scans Per Sec': 'long_table_scans_per_sec',
- 'Database Wait Time Ratio': 'database_wait_time_ratio',
- 'Shared Pool Free %': 'shared_pool_free_percent',
- 'Memory Sorts Ratio': 'memory_sorts_ratio',
- 'SQL Service Response Time': 'sql_service_response_time',
- 'User Rollbacks Per Sec': 'user_rollbacks_per_sec',
- 'Enqueue Timeouts Per Sec': 'enqueue_timeouts_per_sec',
- 'Buffer Cache Hit Ratio': 'buffer_cache_hit_ratio',
- 'Cursor Cache Hit Ratio': 'cursor_cache_hit_ratio',
- 'Library Cache Hit Ratio': 'library_cache_hit_ratio',
- 'Row Cache Hit Ratio': 'row_cache_hit_ratio',
- 'Global Cache Blocks Corrupted': 'global_cache_blocks_corrupted',
- 'Global Cache Blocks Lost': 'global_cache_blocks_lost',
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.user = configuration.get('user')
- self.password = configuration.get('password')
- self.server = configuration.get('server')
- self.service = configuration.get('service')
- self.protocol = configuration.get('protocol', 'tcps')
- self.alive = False
- self.conn = None
- self.active_tablespaces = set()
-
- def connect(self):
- if self.conn:
- self.conn.close()
- self.conn = None
- if HAS_ORACLE_NEW:
- try:
- self.conn = cx_Oracle.connect(
- f'{self.user}/{self.password}@{self.protocol}://{self.server}/{self.service}')
- except cx_Oracle.DatabaseError as error:
- self.error(error)
- return False
- else:
- try:
- self.conn = cx_Oracle.connect(
- CX_CONNECT_STRING_OLD.format(
- self.user,
- self.password,
- self.server,
- self.service,
- ))
- except cx_Oracle.DatabaseError as error:
- self.error(error)
- return False
-
- self.alive = True
- return True
-
- def reconnect(self):
- return self.connect()
-
- def check(self):
- if not HAS_ORACLE_NEW and not HAS_ORACLE_OLD:
- self.error("'oracledb' package is needed to use oracledb module")
- return False
-
- if not all([
- self.user,
- self.password,
- self.server,
- self.service
- ]):
- self.error("one of these parameters is not specified: user, password, server, service")
- return False
-
- if not self.connect():
- return False
-
- return bool(self.get_data())
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
-
- # SYSTEM
- try:
- rv = self.gather_system_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, value in rv:
- if name not in SYS_METRICS:
- continue
- data[SYS_METRICS[name]] = int(float(value) * 1000)
-
- # ACTIVITIES COUNT
- try:
- rv = self.gather_activities_count()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, amount in rv:
- cleaned = name.replace(' ', '_').replace('(', '').replace(')', '')
- new_name = 'activity_{0}'.format(cleaned)
- data[new_name] = int(float(amount) * 1000)
-
- # WAIT TIME
- try:
- rv = self.gather_wait_time_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, amount in rv:
- cleaned = name.replace(' ', '_').replace('/', '').lower()
- new_name = 'wait_time_{0}'.format(cleaned)
- data[new_name] = amount
-
- # TABLESPACE
- try:
- rv = self.gather_tablespace_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, offline, size, used, used_in_percent in rv:
- # TODO: skip offline?
- if not (not offline and self.charts):
- continue
- # TODO: remove inactive?
- if name not in self.active_tablespaces:
- self.active_tablespaces.add(name)
- self.add_tablespace_to_charts(name)
- data['{0}_tablespace_size'.format(name)] = int(size * 1000)
- data['{0}_tablespace_used'.format(name)] = int(used * 1000)
- data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
-
- # ALLOCATED SPACE
- try:
- rv = self.gather_allocated_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, offline, size, used, used_in_percent in rv:
- # TODO: skip offline?
- if not (not offline and self.charts):
- continue
- # TODO: remove inactive?
- if name not in self.active_tablespaces:
- self.active_tablespaces.add(name)
- self.add_tablespace_to_charts(name)
- data['{0}_allocated_size'.format(name)] = int(size * 1000)
- data['{0}_allocated_used'.format(name)] = int(used * 1000)
- data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
-
- return data or None
-
- def gather_system_metrics(self):
-
- """
- :return:
-
- [['Buffer Cache Hit Ratio', 100],
- ['Memory Sorts Ratio', 100],
- ['Redo Allocation Hit Ratio', 100],
- ['User Transaction Per Sec', 0],
- ['Physical Reads Per Sec', 0],
- ['Physical Reads Per Txn', 0],
- ['Physical Writes Per Sec', 0],
- ['Physical Writes Per Txn', 0],
- ['Physical Reads Direct Per Sec', 0],
- ['Physical Reads Direct Per Txn', 0],
- ['Physical Writes Direct Per Sec', 0],
- ['Physical Writes Direct Per Txn', 0],
- ['Physical Reads Direct Lobs Per Sec', 0],
- ['Physical Reads Direct Lobs Per Txn', 0],
- ['Physical Writes Direct Lobs Per Sec', 0],
- ['Physical Writes Direct Lobs Per Txn', 0],
- ['Redo Generated Per Sec', Decimal('4.66666666666667')],
- ['Redo Generated Per Txn', 280],
- ['Logons Per Sec', Decimal('0.0166666666666667')],
- ['Logons Per Txn', 1],
- ['Open Cursors Per Sec', 0.35],
- ['Open Cursors Per Txn', 21],
- ['User Commits Per Sec', 0],
- ['User Commits Percentage', 0],
- ['User Rollbacks Per Sec', 0],
- ['User Rollbacks Percentage', 0],
- ['User Calls Per Sec', Decimal('0.0333333333333333')],
- ['User Calls Per Txn', 2],
- ['Recursive Calls Per Sec', 14.15],
- ['Recursive Calls Per Txn', 849],
- ['Logical Reads Per Sec', Decimal('0.683333333333333')],
- ['Logical Reads Per Txn', 41],
- ['DBWR Checkpoints Per Sec', 0],
- ['Background Checkpoints Per Sec', 0],
- ['Redo Writes Per Sec', Decimal('0.0333333333333333')],
- ['Redo Writes Per Txn', 2],
- ['Long Table Scans Per Sec', 0],
- ['Long Table Scans Per Txn', 0],
- ['Total Table Scans Per Sec', Decimal('0.0166666666666667')],
- ['Total Table Scans Per Txn', 1],
- ['Full Index Scans Per Sec', 0],
- ['Full Index Scans Per Txn', 0],
- ['Total Index Scans Per Sec', Decimal('0.216666666666667')],
- ['Total Index Scans Per Txn', 13],
- ['Total Parse Count Per Sec', 0.35],
- ['Total Parse Count Per Txn', 21],
- ['Hard Parse Count Per Sec', 0],
- ['Hard Parse Count Per Txn', 0],
- ['Parse Failure Count Per Sec', 0],
- ['Parse Failure Count Per Txn', 0],
- ['Cursor Cache Hit Ratio', Decimal('52.3809523809524')],
- ['Disk Sort Per Sec', 0],
- ['Disk Sort Per Txn', 0],
- ['Rows Per Sort', 8.6],
- ['Execute Without Parse Ratio', Decimal('27.5862068965517')],
- ['Soft Parse Ratio', 100],
- ['User Calls Ratio', Decimal('0.235017626321974')],
- ['Host CPU Utilization (%)', Decimal('0.124311845142959')],
- ['Network Traffic Volume Per Sec', 0],
- ['Enqueue Timeouts Per Sec', 0],
- ['Enqueue Timeouts Per Txn', 0],
- ['Enqueue Waits Per Sec', 0],
- ['Enqueue Waits Per Txn', 0],
- ['Enqueue Deadlocks Per Sec', 0],
- ['Enqueue Deadlocks Per Txn', 0],
- ['Enqueue Requests Per Sec', Decimal('216.683333333333')],
- ['Enqueue Requests Per Txn', 13001],
- ['DB Block Gets Per Sec', 0],
- ['DB Block Gets Per Txn', 0],
- ['Consistent Read Gets Per Sec', Decimal('0.683333333333333')],
- ['Consistent Read Gets Per Txn', 41],
- ['DB Block Changes Per Sec', 0],
- ['DB Block Changes Per Txn', 0],
- ['Consistent Read Changes Per Sec', 0],
- ['Consistent Read Changes Per Txn', 0],
- ['CPU Usage Per Sec', 0],
- ['CPU Usage Per Txn', 0],
- ['CR Blocks Created Per Sec', 0],
- ['CR Blocks Created Per Txn', 0],
- ['CR Undo Records Applied Per Sec', 0],
- ['CR Undo Records Applied Per Txn', 0],
- ['User Rollback UndoRec Applied Per Sec', 0],
- ['User Rollback Undo Records Applied Per Txn', 0],
- ['Leaf Node Splits Per Sec', 0],
- ['Leaf Node Splits Per Txn', 0],
- ['Branch Node Splits Per Sec', 0],
- ['Branch Node Splits Per Txn', 0],
- ['PX downgraded 1 to 25% Per Sec', 0],
- ['PX downgraded 25 to 50% Per Sec', 0],
- ['PX downgraded 50 to 75% Per Sec', 0],
- ['PX downgraded 75 to 99% Per Sec', 0],
- ['PX downgraded to serial Per Sec', 0],
- ['Physical Read Total IO Requests Per Sec', Decimal('2.16666666666667')],
- ['Physical Read Total Bytes Per Sec', Decimal('35498.6666666667')],
- ['GC CR Block Received Per Second', 0],
- ['GC CR Block Received Per Txn', 0],
- ['GC Current Block Received Per Second', 0],
- ['GC Current Block Received Per Txn', 0],
- ['Global Cache Average CR Get Time', 0],
- ['Global Cache Average Current Get Time', 0],
- ['Physical Write Total IO Requests Per Sec', Decimal('0.966666666666667')],
- ['Global Cache Blocks Corrupted', 0],
- ['Global Cache Blocks Lost', 0],
- ['Current Logons Count', 49],
- ['Current Open Cursors Count', 64],
- ['User Limit %', Decimal('0.00000114087015416959')],
- ['SQL Service Response Time', 0],
- ['Database Wait Time Ratio', 0],
- ['Database CPU Time Ratio', 0],
- ['Response Time Per Txn', 0],
- ['Row Cache Hit Ratio', 100],
- ['Row Cache Miss Ratio', 0],
- ['Library Cache Hit Ratio', 100],
- ['Library Cache Miss Ratio', 0],
- ['Shared Pool Free %', Decimal('7.82380268491548')],
- ['PGA Cache Hit %', Decimal('98.0399767109115')],
- ['Process Limit %', Decimal('17.6666666666667')],
- ['Session Limit %', Decimal('15.2542372881356')],
- ['Executions Per Txn', 29],
- ['Executions Per Sec', Decimal('0.483333333333333')],
- ['Txns Per Logon', 0],
- ['Database Time Per Sec', 0],
- ['Physical Write Total Bytes Per Sec', 15308.8],
- ['Physical Read IO Requests Per Sec', 0],
- ['Physical Read Bytes Per Sec', 0],
- ['Physical Write IO Requests Per Sec', 0],
- ['Physical Write Bytes Per Sec', 0],
- ['DB Block Changes Per User Call', 0],
- ['DB Block Gets Per User Call', 0],
- ['Executions Per User Call', 14.5],
- ['Logical Reads Per User Call', 20.5],
- ['Total Sorts Per User Call', 2.5],
- ['Total Table Scans Per User Call', 0.5],
- ['Current OS Load', 0.0390625],
- ['Streams Pool Usage Percentage', 0],
- ['PQ QC Session Count', 0],
- ['PQ Slave Session Count', 0],
- ['Queries parallelized Per Sec', 0],
- ['DML statements parallelized Per Sec', 0],
- ['DDL statements parallelized Per Sec', 0],
- ['PX operations not downgraded Per Sec', 0],
- ['Session Count', 72],
- ['Average Synchronous Single-Block Read Latency', 0],
- ['I/O Megabytes per Second', 0.05],
- ['I/O Requests per Second', Decimal('3.13333333333333')],
- ['Average Active Sessions', 0],
- ['Active Serial Sessions', 1],
- ['Active Parallel Sessions', 0],
- ['Captured user calls', 0],
- ['Replayed user calls', 0],
- ['Workload Capture and Replay status', 0],
- ['Background CPU Usage Per Sec', Decimal('1.22578833333333')],
- ['Background Time Per Sec', 0.0147551],
- ['Host CPU Usage Per Sec', Decimal('0.116666666666667')],
- ['Cell Physical IO Interconnect Bytes', 3048448],
- ['Temp Space Used', 0],
- ['Total PGA Allocated', 200657920],
- ['Total PGA Used by SQL Workareas', 0],
- ['Run Queue Per Sec', 0],
- ['VM in bytes Per Sec', 0],
- ['VM out bytes Per Sec', 0]]
- """
-
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_SYSTEM)
- for metric_name, value in cursor.fetchall():
- metrics.append([metric_name, value])
- return metrics
-
- def gather_tablespace_metrics(self):
- """
- :return:
-
- [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
- ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
- ['TEMP', 0.0, 3233177600.0, 0.0, 0],
- ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_TABLESPACE)
- for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
- if used_bytes is None:
- offline = True
- used = 0
- else:
- offline = False
- used = float(used_bytes)
- if max_bytes is None:
- size = 0
- else:
- size = float(max_bytes)
- if used_percent is None:
- used_percent = 0
- else:
- used_percent = float(used_percent)
- metrics.append(
- [
- tablespace_name,
- offline,
- size,
- used,
- used_percent,
- ]
- )
- return metrics
-
- def gather_allocated_metrics(self):
- """
- :return:
-
- [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
- ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
- ['TEMP', 0.0, 3233177600.0, 0.0, 0],
- ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_ALLOCATED)
- for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
- if used_bytes is None:
- offline = True
- used = 0
- else:
- offline = False
- used = float(used_bytes)
- if max_bytes is None:
- size = 0
- else:
- size = float(max_bytes)
- if used_percent is None:
- used_percent = 0
- else:
- used_percent = float(used_percent)
- metrics.append(
- [
- tablespace_name,
- offline,
- size,
- used,
- used_percent,
- ]
- )
- return metrics
-
- def gather_wait_time_metrics(self):
- """
- :return:
-
- [['Other', 0],
- ['Application', 0],
- ['Configuration', 0],
- ['Administrative', 0],
- ['Concurrency', 0],
- ['Commit', 0],
- ['Network', 0],
- ['User I/O', 0],
- ['System I/O', 0.002],
- ['Scheduler', 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_WAIT_TIME)
- for wait_class_name, value in cursor.fetchall():
- metrics.append([wait_class_name, value])
- return metrics
-
- def gather_activities_count(self):
- """
- :return:
-
- [('user commits', 9104),
- ('user rollbacks', 17),
- ('parse count (total)', 483695),
- ('execute count', 2020356)]
- """
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_ACTIVITIES_COUNT)
- return cursor.fetchall()
-
- # def gather_process_metrics(self):
- # """
- # :return:
- #
- # [['PSEUDO', 'pga_used_memory', 0],
- # ['PSEUDO', 'pga_allocated_memory', 0],
- # ['PSEUDO', 'pga_freeable_memory', 0],
- # ['PSEUDO', 'pga_maximum_memory', 0],
- # ['oracle@localhost.localdomain (PMON)', 'pga_used_memory', 1793827],
- # ['oracle@localhost.localdomain (PMON)', 'pga_allocated_memory', 1888651],
- # ['oracle@localhost.localdomain (PMON)', 'pga_freeable_memory', 0],
- # ['oracle@localhost.localdomain (PMON)', 'pga_maximum_memory', 1888651],
- # ...
- # ...
- # """
- #
- # metrics = list()
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_PROCESS)
- # for row in cursor.fetchall():
- # for i, name in enumerate(PROCESS_METRICS, 1):
- # metrics.append([row[0], name, row[i]])
- # return metrics
-
- # def gather_processes_count(self):
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_PROCESSES_COUNT)
- # return cursor.fetchone()[0] # 53
-
- # def gather_sessions_count(self):
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_SESSION_COUNT)
- # total, active, inactive = 0, 0, 0
- # for status, _ in cursor.fetchall():
- # total += 1
- # active += status == 'ACTIVE'
- # inactive += status == 'INACTIVE'
- # return [total, active, inactive]
-
- def add_tablespace_to_charts(self, name):
- self.charts['tablespace_size'].add_dimension(
- [
- '{0}_tablespace_size'.format(name),
- name,
- 'absolute',
- 1,
- 1024 * 1000,
- ])
- self.charts['tablespace_usage'].add_dimension(
- [
- '{0}_tablespace_used'.format(name),
- name,
- 'absolute',
- 1,
- 1024 * 1000,
- ])
- self.charts['tablespace_usage_in_percent'].add_dimension(
- [
- '{0}_tablespace_used_in_percent'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_size'].add_dimension(
- [
- '{0}_allocated_size'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_usage'].add_dimension(
- [
- '{0}_allocated_used'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_usage_in_percent'].add_dimension(
- [
- '{0}_allocated_used_in_percent'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
diff --git a/collectors/python.d.plugin/oracledb/oracledb.conf b/collectors/python.d.plugin/oracledb/oracledb.conf
deleted file mode 100644
index 027215dad..000000000
--- a/collectors/python.d.plugin/oracledb/oracledb.conf
+++ /dev/null
@@ -1,88 +0,0 @@
-# netdata python.d.plugin configuration for oracledb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, oracledb also supports the following:
-#
-# user: username # the username for the user account. Required.
-# password: password # the password for the user account. Required.
-# server: localhost:1521 # the IP address or hostname (and port) of the Oracle Database Server. Required.
-# service: XE # the Oracle Database service name. Required. To view the services available on your server,
-# run this query: `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
-# protocol: tcp/tcps # one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic
-# or encrypted network traffic
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-#local:
-# user: 'netdata'
-# password: 'secret'
-# server: 'localhost:1521'
-# service: 'XE'
-# protocol: 'tcps'
-
-#remote:
-# user: 'netdata'
-# password: 'secret'
-# server: '10.0.0.1:1521'
-# service: 'XE'
-# protocol: 'tcps'
diff --git a/collectors/python.d.plugin/pandas/Makefile.inc b/collectors/python.d.plugin/pandas/Makefile.inc
deleted file mode 100644
index 9f4f9b34b..000000000
--- a/collectors/python.d.plugin/pandas/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += pandas/pandas.chart.py
-dist_pythonconfig_DATA += pandas/pandas.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += pandas/README.md pandas/Makefile.inc
-
diff --git a/collectors/python.d.plugin/pandas/README.md b/collectors/python.d.plugin/pandas/README.md
deleted file mode 120000
index 2fabe63c1..000000000
--- a/collectors/python.d.plugin/pandas/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/pandas.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/pandas/integrations/pandas.md b/collectors/python.d.plugin/pandas/integrations/pandas.md
deleted file mode 100644
index 83c5c66b1..000000000
--- a/collectors/python.d.plugin/pandas/integrations/pandas.md
+++ /dev/null
@@ -1,365 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/pandas/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/pandas/metadata.yaml"
-sidebar_label: "Pandas"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Generic Data Collection"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Pandas
-
-
-<img src="https://netdata.cloud/img/pandas.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: pandas
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.
-If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),
-either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.
-
-This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.
-
-
-The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken
-as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).
-See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html)."
-
-
-### Per Pandas instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Python Requirements
-
-This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.
-
-```bash
-sudo pip install pandas requests
-```
-
-Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.
-
-```bash
-sudo pip install 'sqlalchemy<2.0' psycopg2-binary
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/pandas.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/pandas.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| chart_configs | an array of chart configuration dictionaries | [] | yes |
-| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Temperature API Example
-
-example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
-
-<details><summary>Config</summary>
-
-```yaml
-temperature:
- name: "temperature"
- update_every: 5
- chart_configs:
- - name: "temperature_forecast_by_city"
- title: "Temperature By City - Today Forecast"
- family: "temperature.today"
- context: "pandas.temperature"
- type: "line"
- units: "Celsius"
- df_steps: >
- pd.DataFrame.from_dict(
- {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
- for (city,lat,lng)
- in [
- ('dublin', 53.3441, -6.2675),
- ('athens', 37.9792, 23.7166),
- ('london', 51.5002, -0.1262),
- ('berlin', 52.5235, 13.4115),
- ('paris', 48.8567, 2.3510),
- ('madrid', 40.4167, -3.7033),
- ('new_york', 40.71, -74.01),
- ('los_angeles', 34.05, -118.24),
- ]
- }
- );
- df.describe(); # get aggregate stats for each city;
- df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
- df.rename(columns={'index':'city'}); # some column renaming;
- df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
- df.rename(columns={0:'degrees'}); # some column renaming;
- pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
- df.rename(columns={0:'measurement'}); # some column renaming;
- df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
- df.sort_index(); # sort by city name;
- df.transpose(); # transpose so its just one wide row;
- - name: "temperature_current_by_city"
- title: "Temperature By City - Current"
- family: "temperature.current"
- context: "pandas.temperature"
- type: "line"
- units: "Celsius"
- df_steps: >
- pd.DataFrame.from_dict(
- {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
- for (city,lat,lng)
- in [
- ('dublin', 53.3441, -6.2675),
- ('athens', 37.9792, 23.7166),
- ('london', 51.5002, -0.1262),
- ('berlin', 52.5235, 13.4115),
- ('paris', 48.8567, 2.3510),
- ('madrid', 40.4167, -3.7033),
- ('new_york', 40.71, -74.01),
- ('los_angeles', 34.05, -118.24),
- ]
- }
- );
- df.transpose();
- df[['temperature']];
- df.transpose();
-
-```
-</details>
-
-##### API CSV Example
-
-example showing a read_csv from a url and some light pandas data wrangling.
-
-<details><summary>Config</summary>
-
-```yaml
-example_csv:
- name: "example_csv"
- update_every: 2
- chart_configs:
- - name: "london_system_cpu"
- title: "London System CPU - Ratios"
- family: "london_system_cpu"
- context: "pandas"
- type: "line"
- units: "n"
- df_steps: >
- pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
- df.drop('time', axis=1);
- df.mean().to_frame().transpose();
- df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
- df.rename(columns={0:'average_user_system_ratio'});
- df*100;
-
-```
-</details>
-
-##### API JSON Example
-
-example showing a read_json from a url and some light pandas data wrangling.
-
-<details><summary>Config</summary>
-
-```yaml
-example_json:
- name: "example_json"
- update_every: 2
- chart_configs:
- - name: "london_system_net"
- title: "London System Net - Total Bandwidth"
- family: "london_system_net"
- context: "pandas"
- type: "area"
- units: "kilobits/s"
- df_steps: >
- pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
- df.drop('time', axis=1);
- abs(df);
- df.sum(axis=1).to_frame();
- df.rename(columns={0:'total_bandwidth'});
-
-```
-</details>
-
-##### XML Example
-
-example showing a read_xml from a url and some light pandas data wrangling.
-
-<details><summary>Config</summary>
-
-```yaml
-example_xml:
- name: "example_xml"
- update_every: 2
- line_sep: "|"
- chart_configs:
- - name: "temperature_forcast"
- title: "Temperature Forecast"
- family: "temp"
- context: "pandas.temp"
- type: "line"
- units: "celsius"
- df_steps: >
- pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
- df.rename(columns={'value': 'dublin'})|
- df[['dublin']]|
-
-```
-</details>
-
-##### SQL Example
-
-example showing a read_sql from a postgres database using sqlalchemy.
-
-<details><summary>Config</summary>
-
-```yaml
-sql:
- name: "sql"
- update_every: 5
- chart_configs:
- - name: "sql"
- title: "SQL Example"
- family: "sql.example"
- context: "example"
- type: "line"
- units: "percent"
- df_steps: >
- pd.read_sql_query(
- sql='\
- select \
- random()*100 as metric_1, \
- random()*100 as metric_2 \
- ',
- con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
- );
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin pandas debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/pandas/metadata.yaml b/collectors/python.d.plugin/pandas/metadata.yaml
deleted file mode 100644
index 92ee1e986..000000000
--- a/collectors/python.d.plugin/pandas/metadata.yaml
+++ /dev/null
@@ -1,308 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: pandas
- monitored_instance:
- name: Pandas
- link: https://pandas.pydata.org/
- categories:
- - data-collection.generic-data-collection
- icon_filename: pandas.png
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - pandas
- - python
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- [Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.
- If you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),
- either locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.
-
- This collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.
- method_description: |
- The collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Python Requirements
- description: |
- This collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.
-
- ```bash
- sudo pip install pandas requests
- ```
-
- Note: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.
-
- ```bash
- sudo pip install 'sqlalchemy<2.0' psycopg2-binary
- ```
- configuration:
- file:
- name: python.d/pandas.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: chart_configs
- description: an array of chart configuration dictionaries
- default_value: "[]"
- required: true
- - name: chart_configs.name
- description: name of the chart to be displayed in the dashboard.
- default_value: None
- required: true
- - name: chart_configs.title
- description: title of the chart to be displayed in the dashboard.
- default_value: None
- required: true
- - name: chart_configs.family
- description: "[family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard."
- default_value: None
- required: true
- - name: chart_configs.context
- description: "[context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard."
- default_value: None
- required: true
- - name: chart_configs.type
- description: the type of the chart to be displayed in the dashboard.
- default_value: None
- required: true
- - name: chart_configs.units
- description: the units of the chart to be displayed in the dashboard.
- default_value: None
- required: true
- - name: chart_configs.df_steps
- description: a series of pandas operations (one per line) that each returns a dataframe.
- default_value: None
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Temperature API Example
- folding:
- enabled: true
- description: example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
- config: |
- temperature:
- name: "temperature"
- update_every: 5
- chart_configs:
- - name: "temperature_forecast_by_city"
- title: "Temperature By City - Today Forecast"
- family: "temperature.today"
- context: "pandas.temperature"
- type: "line"
- units: "Celsius"
- df_steps: >
- pd.DataFrame.from_dict(
- {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
- for (city,lat,lng)
- in [
- ('dublin', 53.3441, -6.2675),
- ('athens', 37.9792, 23.7166),
- ('london', 51.5002, -0.1262),
- ('berlin', 52.5235, 13.4115),
- ('paris', 48.8567, 2.3510),
- ('madrid', 40.4167, -3.7033),
- ('new_york', 40.71, -74.01),
- ('los_angeles', 34.05, -118.24),
- ]
- }
- );
- df.describe(); # get aggregate stats for each city;
- df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
- df.rename(columns={'index':'city'}); # some column renaming;
- df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
- df.rename(columns={0:'degrees'}); # some column renaming;
- pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
- df.rename(columns={0:'measurement'}); # some column renaming;
- df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
- df.sort_index(); # sort by city name;
- df.transpose(); # transpose so its just one wide row;
- - name: "temperature_current_by_city"
- title: "Temperature By City - Current"
- family: "temperature.current"
- context: "pandas.temperature"
- type: "line"
- units: "Celsius"
- df_steps: >
- pd.DataFrame.from_dict(
- {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
- for (city,lat,lng)
- in [
- ('dublin', 53.3441, -6.2675),
- ('athens', 37.9792, 23.7166),
- ('london', 51.5002, -0.1262),
- ('berlin', 52.5235, 13.4115),
- ('paris', 48.8567, 2.3510),
- ('madrid', 40.4167, -3.7033),
- ('new_york', 40.71, -74.01),
- ('los_angeles', 34.05, -118.24),
- ]
- }
- );
- df.transpose();
- df[['temperature']];
- df.transpose();
- - name: API CSV Example
- folding:
- enabled: true
- description: example showing a read_csv from a url and some light pandas data wrangling.
- config: |
- example_csv:
- name: "example_csv"
- update_every: 2
- chart_configs:
- - name: "london_system_cpu"
- title: "London System CPU - Ratios"
- family: "london_system_cpu"
- context: "pandas"
- type: "line"
- units: "n"
- df_steps: >
- pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
- df.drop('time', axis=1);
- df.mean().to_frame().transpose();
- df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
- df.rename(columns={0:'average_user_system_ratio'});
- df*100;
- - name: API JSON Example
- folding:
- enabled: true
- description: example showing a read_json from a url and some light pandas data wrangling.
- config: |
- example_json:
- name: "example_json"
- update_every: 2
- chart_configs:
- - name: "london_system_net"
- title: "London System Net - Total Bandwidth"
- family: "london_system_net"
- context: "pandas"
- type: "area"
- units: "kilobits/s"
- df_steps: >
- pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
- df.drop('time', axis=1);
- abs(df);
- df.sum(axis=1).to_frame();
- df.rename(columns={0:'total_bandwidth'});
- - name: XML Example
- folding:
- enabled: true
- description: example showing a read_xml from a url and some light pandas data wrangling.
- config: |
- example_xml:
- name: "example_xml"
- update_every: 2
- line_sep: "|"
- chart_configs:
- - name: "temperature_forcast"
- title: "Temperature Forecast"
- family: "temp"
- context: "pandas.temp"
- type: "line"
- units: "celsius"
- df_steps: >
- pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
- df.rename(columns={'value': 'dublin'})|
- df[['dublin']]|
- - name: SQL Example
- folding:
- enabled: true
- description: example showing a read_sql from a postgres database using sqlalchemy.
- config: |
- sql:
- name: "sql"
- update_every: 5
- chart_configs:
- - name: "sql"
- title: "SQL Example"
- family: "sql.example"
- context: "example"
- type: "line"
- units: "percent"
- df_steps: >
- pd.read_sql_query(
- sql='\
- select \
- random()*100 as metric_1, \
- random()*100 as metric_2 \
- ',
- con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
- );
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: |
- This collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken
- as the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).
- See [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html)."
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics: []
diff --git a/collectors/python.d.plugin/pandas/pandas.chart.py b/collectors/python.d.plugin/pandas/pandas.chart.py
deleted file mode 100644
index 7977bcb36..000000000
--- a/collectors/python.d.plugin/pandas/pandas.chart.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: pandas netdata python.d module
-# Author: Andrew Maguire (andrewm4894)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import pandas as pd
-
-try:
- import requests
- HAS_REQUESTS = True
-except ImportError:
- HAS_REQUESTS = False
-
-try:
- from sqlalchemy import create_engine
- HAS_SQLALCHEMY = True
-except ImportError:
- HAS_SQLALCHEMY = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-ORDER = []
-
-CHARTS = {}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.chart_configs = self.configuration.get('chart_configs', None)
- self.line_sep = self.configuration.get('line_sep', ';')
-
- def run_code(self, df_steps):
- """eval() each line of code and ensure the result is a pandas dataframe"""
-
- # process each line of code
- lines = df_steps.split(self.line_sep)
- for line in lines:
- line_clean = line.strip('\n').strip(' ')
- if line_clean != '' and line_clean[0] != '#':
- df = eval(line_clean)
- assert isinstance(df, pd.DataFrame), 'The result of each evaluated line of `df_steps` must be of type `pd.DataFrame`'
-
- # take top row of final df as data to be collected by netdata
- data = df.to_dict(orient='records')[0]
-
- return data
-
- def check(self):
- """ensure charts and dims all configured and that we can get data"""
-
- if not HAS_REQUESTS:
- self.warning('requests library could not be imported')
-
- if not HAS_SQLALCHEMY:
- self.warning('sqlalchemy library could not be imported')
-
- if not self.chart_configs:
- self.error('chart_configs must be defined')
-
- data = dict()
-
- # add each chart as defined by the config
- for chart_config in self.chart_configs:
- if chart_config['name'] not in self.charts:
- chart_template = {
- 'options': [
- chart_config['name'],
- chart_config['title'],
- chart_config['units'],
- chart_config['family'],
- chart_config['context'],
- chart_config['type']
- ],
- 'lines': []
- }
- self.charts.add_chart([chart_config['name']] + chart_template['options'])
-
- data_tmp = self.run_code(chart_config['df_steps'])
- data.update(data_tmp)
-
- for dim in data_tmp:
- self.charts[chart_config['name']].add_dimension([dim, dim, 'absolute', 1, 1])
-
- return True
-
- def get_data(self):
- """get data for each chart config"""
-
- data = dict()
-
- for chart_config in self.chart_configs:
- data_tmp = self.run_code(chart_config['df_steps'])
- data.update(data_tmp)
-
- return data
diff --git a/collectors/python.d.plugin/pandas/pandas.conf b/collectors/python.d.plugin/pandas/pandas.conf
deleted file mode 100644
index 74a7da3e9..000000000
--- a/collectors/python.d.plugin/pandas/pandas.conf
+++ /dev/null
@@ -1,211 +0,0 @@
-# netdata python.d.plugin configuration for pandas
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear on the dashboard
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# chart_configs: [<dictionary>] # an array for chart config dictionaries.
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-# Some example configurations, enable this collector, uncomment and example below and restart netdata to enable.
-
-# example pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.
-# temperature:
-# name: "temperature"
-# update_every: 5
-# chart_configs:
-# - name: "temperature_forecast_by_city"
-# title: "Temperature By City - Today Forecast"
-# family: "temperature.today"
-# context: "pandas.temperature"
-# type: "line"
-# units: "Celsius"
-# df_steps: >
-# pd.DataFrame.from_dict(
-# {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']
-# for (city,lat,lng)
-# in [
-# ('dublin', 53.3441, -6.2675),
-# ('athens', 37.9792, 23.7166),
-# ('london', 51.5002, -0.1262),
-# ('berlin', 52.5235, 13.4115),
-# ('paris', 48.8567, 2.3510),
-# ('madrid', 40.4167, -3.7033),
-# ('new_york', 40.71, -74.01),
-# ('los_angeles', 34.05, -118.24),
-# ]
-# }
-# );
-# df.describe(); # get aggregate stats for each city;
-# df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;
-# df.rename(columns={'index':'city'}); # some column renaming;
-# df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;
-# df.rename(columns={0:'degrees'}); # some column renaming;
-# pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;
-# df.rename(columns={0:'measurement'}); # some column renaming;
-# df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;
-# df.sort_index(); # sort by city name;
-# df.transpose(); # transpose so its just one wide row;
-# - name: "temperature_current_by_city"
-# title: "Temperature By City - Current"
-# family: "temperature.current"
-# context: "pandas.temperature"
-# type: "line"
-# units: "Celsius"
-# df_steps: >
-# pd.DataFrame.from_dict(
-# {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&current_weather=true').json()['current_weather']
-# for (city,lat,lng)
-# in [
-# ('dublin', 53.3441, -6.2675),
-# ('athens', 37.9792, 23.7166),
-# ('london', 51.5002, -0.1262),
-# ('berlin', 52.5235, 13.4115),
-# ('paris', 48.8567, 2.3510),
-# ('madrid', 40.4167, -3.7033),
-# ('new_york', 40.71, -74.01),
-# ('los_angeles', 34.05, -118.24),
-# ]
-# }
-# );
-# df.transpose();
-# df[['temperature']];
-# df.transpose();
-
-# example showing a read_csv from a url and some light pandas data wrangling.
-# pull data in csv format from london demo server and then ratio of user cpus over system cpu averaged over last 60 seconds.
-# example_csv:
-# name: "example_csv"
-# update_every: 2
-# chart_configs:
-# - name: "london_system_cpu"
-# title: "London System CPU - Ratios"
-# family: "london_system_cpu"
-# context: "pandas"
-# type: "line"
-# units: "n"
-# df_steps: >
-# pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});
-# df.drop('time', axis=1);
-# df.mean().to_frame().transpose();
-# df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();
-# df.rename(columns={0:'average_user_system_ratio'});
-# df*100;
-
-# example showing a read_json from a url and some light pandas data wrangling.
-# pull data in json format (using requests.get() if json data is too complex for pd.read_json() ) from london demo server and work out 'total_bandwidth'.
-# example_json:
-# name: "example_json"
-# update_every: 2
-# chart_configs:
-# - name: "london_system_net"
-# title: "London System Net - Total Bandwidth"
-# family: "london_system_net"
-# context: "pandas"
-# type: "area"
-# units: "kilobits/s"
-# df_steps: >
-# pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);
-# df.drop('time', axis=1);
-# abs(df);
-# df.sum(axis=1).to_frame();
-# df.rename(columns={0:'total_bandwidth'});
-
-# example showing a read_xml from a url and some light pandas data wrangling.
-# pull weather forecast data in xml format, use xpath to pull out temperature forecast.
-# example_xml:
-# name: "example_xml"
-# update_every: 2
-# line_sep: "|"
-# chart_configs:
-# - name: "temperature_forcast"
-# title: "Temperature Forecast"
-# family: "temp"
-# context: "pandas.temp"
-# type: "line"
-# units: "celsius"
-# df_steps: >
-# pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|
-# df.rename(columns={'value': 'dublin'})|
-# df[['dublin']]|
-
-# example showing a read_sql from a postgres database using sqlalchemy.
-# note: example assumes a running postgress db on localhost with a netdata users and password netdata.
-# sql:
-# name: "sql"
-# update_every: 5
-# chart_configs:
-# - name: "sql"
-# title: "SQL Example"
-# family: "sql.example"
-# context: "example"
-# type: "line"
-# units: "percent"
-# df_steps: >
-# pd.read_sql_query(
-# sql='\
-# select \
-# random()*100 as metric_1, \
-# random()*100 as metric_2 \
-# ',
-# con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')
-# );
diff --git a/collectors/python.d.plugin/postfix/Makefile.inc b/collectors/python.d.plugin/postfix/Makefile.inc
deleted file mode 100644
index f4091b217..000000000
--- a/collectors/python.d.plugin/postfix/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += postfix/postfix.chart.py
-dist_pythonconfig_DATA += postfix/postfix.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += postfix/README.md postfix/Makefile.inc
-
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
deleted file mode 120000
index c62eb5c24..000000000
--- a/collectors/python.d.plugin/postfix/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/postfix.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/postfix/integrations/postfix.md b/collectors/python.d.plugin/postfix/integrations/postfix.md
deleted file mode 100644
index 2bb99922c..000000000
--- a/collectors/python.d.plugin/postfix/integrations/postfix.md
+++ /dev/null
@@ -1,151 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/metadata.yaml"
-sidebar_label: "Postfix"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Postfix
-
-
-<img src="https://netdata.cloud/img/postfix.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: postfix
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Keep an eye on Postfix metrics for efficient mail server operations.
-Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
-
-
-Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
-See the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector executes `postqueue -p` to get Postfix queue statistics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Postfix instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| postfix.qemails | emails | emails |
-| postfix.qsize | size | KiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin postfix debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/postfix/metadata.yaml b/collectors/python.d.plugin/postfix/metadata.yaml
deleted file mode 100644
index 1bbb61164..000000000
--- a/collectors/python.d.plugin/postfix/metadata.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: postfix
- monitored_instance:
- name: Postfix
- link: https://www.postfix.org/
- categories:
- - data-collection.mail-servers
- icon_filename: "postfix.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - postfix
- - mail
- - mail server
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Keep an eye on Postfix metrics for efficient mail server operations.
-
- Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
- method_description: >
- Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: >
- Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view
- the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to
- view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
-
- See the `authorized_mailq_users` setting in
- the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
- default_behavior:
- auto_detection:
- description: "The collector executes `postqueue -p` to get Postfix queue statistics."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: postfix.qemails
- description: Postfix Queue Emails
- unit: "emails"
- chart_type: line
- dimensions:
- - name: emails
- - name: postfix.qsize
- description: Postfix Queue Emails Size
- unit: "KiB"
- chart_type: area
- dimensions:
- - name: size
diff --git a/collectors/python.d.plugin/postfix/postfix.chart.py b/collectors/python.d.plugin/postfix/postfix.chart.py
deleted file mode 100644
index b650514ee..000000000
--- a/collectors/python.d.plugin/postfix/postfix.chart.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: postfix netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-POSTQUEUE_COMMAND = 'postqueue -p'
-
-ORDER = [
- 'qemails',
- 'qsize',
-]
-
-CHARTS = {
- 'qemails': {
- 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]
- },
- 'qsize': {
- 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
- 'lines': [
- ['size', None, 'absolute']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = POSTQUEUE_COMMAND
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- raw = self._get_raw_data()[-1].split(' ')
- if raw[0] == 'Mail' and raw[1] == 'queue':
- return {'emails': 0,
- 'size': 0}
-
- return {'emails': raw[4],
- 'size': raw[1]}
- except (ValueError, AttributeError):
- return None
diff --git a/collectors/python.d.plugin/postfix/postfix.conf b/collectors/python.d.plugin/postfix/postfix.conf
deleted file mode 100644
index a4d2472ee..000000000
--- a/collectors/python.d.plugin/postfix/postfix.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for postfix
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# postfix is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, postfix also supports the following:
-#
-# command: 'postqueue -p' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- command: 'postqueue -p'
diff --git a/collectors/python.d.plugin/puppet/Makefile.inc b/collectors/python.d.plugin/puppet/Makefile.inc
deleted file mode 100644
index fe94b9254..000000000
--- a/collectors/python.d.plugin/puppet/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += puppet/puppet.chart.py
-dist_pythonconfig_DATA += puppet/puppet.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += puppet/README.md puppet/Makefile.inc
-
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
deleted file mode 120000
index b6c4c83f9..000000000
--- a/collectors/python.d.plugin/puppet/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/puppet.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/puppet/integrations/puppet.md b/collectors/python.d.plugin/puppet/integrations/puppet.md
deleted file mode 100644
index ca190b576..000000000
--- a/collectors/python.d.plugin/puppet/integrations/puppet.md
+++ /dev/null
@@ -1,215 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/metadata.yaml"
-sidebar_label: "Puppet"
-learn_status: "Published"
-learn_rel_path: "Data Collection/CICD Platforms"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Puppet
-
-
-<img src="https://netdata.cloud/img/puppet.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: puppet
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
-
-
-It uses Puppet's metrics API endpoint to gather the metrics.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Puppet instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| puppet.jvm | committed, used | MiB |
-| puppet.jvm | committed, used | MiB |
-| puppet.cpu | execution, GC | percentage |
-| puppet.fdopen | used | descriptors |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/puppet.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/puppet.conf
-```
-#### Options
-
-This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-> Notes:
-> - Exact Fully Qualified Domain Name of the node should be used.
-> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
-> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |
-| tls_verify | Control HTTPS server certificate verification. | False | no |
-| tls_ca_file | Optional CA (bundle) file to use | | no |
-| tls_cert_file | Optional client certificate file | | no |
-| tls_key_file | Optional client key file | | no |
-| update_every | Sets the default data collection frequency. | 30 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration
-
-```yaml
-puppetserver:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
-```
-##### TLS Certificate
-
-An example using a TLS certificate
-
-<details><summary>Config</summary>
-
-```yaml
-puppetdb:
- url: 'https://fqdn.example.com:8081'
- tls_cert_file: /path/to/client.crt
- tls_key_file: /path/to/client.key
- autodetection_retry: 1
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-puppetserver1:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
-puppetserver2:
- url: 'https://fqdn.example2.com:8140'
- autodetection_retry: 1
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin puppet debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/puppet/metadata.yaml b/collectors/python.d.plugin/puppet/metadata.yaml
deleted file mode 100644
index 781519b6a..000000000
--- a/collectors/python.d.plugin/puppet/metadata.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: puppet
- monitored_instance:
- name: Puppet
- link: "https://www.puppet.com/"
- categories:
- - data-collection.ci-cd-systems
- icon_filename: "puppet.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - puppet
- - jvm heap
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
- method_description: |
- It uses Puppet's metrics API endpoint to gather the metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/puppet.conf"
- options:
- description: |
- This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
- > Notes:
- > - Exact Fully Qualified Domain Name of the node should be used.
- > - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
- > - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: url
- description: HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used.
- default_value: https://fqdn.example.com:8081
- required: true
- - name: tls_verify
- description: Control HTTPS server certificate verification.
- default_value: "False"
- required: false
- - name: tls_ca_file
- description: Optional CA (bundle) file to use
- default_value: ""
- required: false
- - name: tls_cert_file
- description: Optional client certificate file
- default_value: ""
- required: false
- - name: tls_key_file
- description: Optional client key file
- default_value: ""
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 30
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration
- folding:
- enabled: false
- config: |
- puppetserver:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
- - name: TLS Certificate
- description: An example using a TLS certificate
- config: |
- puppetdb:
- url: 'https://fqdn.example.com:8081'
- tls_cert_file: /path/to/client.crt
- tls_key_file: /path/to/client.key
- autodetection_retry: 1
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- puppetserver1:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
- puppetserver2:
- url: 'https://fqdn.example2.com:8140'
- autodetection_retry: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: puppet.jvm
- description: JVM Heap
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: committed
- - name: used
- - name: puppet.jvm
- description: JVM Non-Heap
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: committed
- - name: used
- - name: puppet.cpu
- description: CPU usage
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: execution
- - name: GC
- - name: puppet.fdopen
- description: File Descriptors
- unit: "descriptors"
- chart_type: line
- dimensions:
- - name: used
diff --git a/collectors/python.d.plugin/puppet/puppet.chart.py b/collectors/python.d.plugin/puppet/puppet.chart.py
deleted file mode 100644
index f8adf6006..000000000
--- a/collectors/python.d.plugin/puppet/puppet.chart.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: puppet netdata python.d module
-# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This module should work both with OpenSource and PE versions
-# of PuppetServer and PuppetDB.
-#
-# NOTE: PuppetDB may be configured to require proper TLS
-# client certificate for security reasons. Use tls_key_file
-# and tls_cert_file options then.
-#
-
-import socket
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 5
-
-MiB = 1 << 20
-CPU_SCALE = 1000
-
-ORDER = [
- 'jvm_heap',
- 'jvm_nonheap',
- 'cpu',
- 'fd_open',
-]
-
-CHARTS = {
- 'jvm_heap': {
- 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
- 'lines': [
- ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
- ['jvm_heap_used', 'used', 'absolute', 1, MiB],
- ],
- 'variables': [
- ['jvm_heap_max'],
- ['jvm_heap_init'],
- ],
- },
- 'jvm_nonheap': {
- 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
- 'lines': [
- ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
- ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
- ],
- 'variables': [
- ['jvm_nonheap_max'],
- ['jvm_nonheap_init'],
- ],
- },
- 'cpu': {
- 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
- 'lines': [
- ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
- ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
- ]
- },
- 'fd_open': {
- 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
- 'lines': [
- ['fd_used', 'used', 'absolute'],
- ],
- 'variables': [
- ['fd_max'],
- ],
- },
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = 'https://{0}:8140'.format(socket.getfqdn())
-
- def _get_data(self):
- # NOTE: there are several ways to retrieve data
- # 1. Only PE versions:
- # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
- # 2. Individual Metrics API (JMX):
- # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
- # 3. Extended status at debug level:
- # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
- #
- # For sake of simplicity and efficiency the status one is used..
-
- raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
-
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
- data = {}
-
- try:
- try:
- jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
- except KeyError:
- jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
-
- heap_mem = jvm_metrics['heap-memory']
- non_heap_mem = jvm_metrics['non-heap-memory']
-
- for k in ['max', 'committed', 'used', 'init']:
- data['jvm_heap_' + k] = heap_mem[k]
- data['jvm_nonheap_' + k] = non_heap_mem[k]
-
- fd_open = jvm_metrics['file-descriptors']
- data['fd_max'] = fd_open['max']
- data['fd_used'] = fd_open['used']
-
- data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
- data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
- except KeyError:
- pass
-
- return data or None
diff --git a/collectors/python.d.plugin/puppet/puppet.conf b/collectors/python.d.plugin/puppet/puppet.conf
deleted file mode 100644
index ff5c3d020..000000000
--- a/collectors/python.d.plugin/puppet/puppet.conf
+++ /dev/null
@@ -1,94 +0,0 @@
-# netdata python.d.plugin configuration for Puppet Server and Puppet DB
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# These configuration comes from UrlService base:
-# url: # HTTP or HTTPS URL
-# tls_verify: False # Control HTTPS server certificate verification
-# tls_ca_file: # Optional CA (bundle) file to use
-# tls_cert_file: # Optional client certificate file
-# tls_key_file: # Optional client key file
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-# puppet:
-# url: 'https://<FQDN>:8140'
-#
-
-#
-# Production configuration should look like below.
-#
-# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
-# be quite reasonable retry count.
-#
-# NOTE: secure PuppetDB config may require client certificate.
-# Not applies to default PuppetDB configuration though.
-#
-# puppetdb:
-# url: 'https://fqdn.example.com:8081'
-# tls_cert_file: /path/to/client.crt
-# tls_key_file: /path/to/client.key
-# autodetection_retry: 1
-#
-# puppetserver:
-# url: 'https://fqdn.example.com:8140'
-# autodetection_retry: 1
-#
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
deleted file mode 100644
index 3953ce2b4..000000000
--- a/collectors/python.d.plugin/python.d.conf
+++ /dev/null
@@ -1,78 +0,0 @@
-# netdata python.d.plugin configuration
-#
-# This file is in YaML format.
-# Generally the format is:
-#
-# name: value
-#
-
-# Enable / disable the whole python.d.plugin (all its modules)
-enabled: yes
-
-# ----------------------------------------------------------------------
-# Enable / Disable python.d.plugin modules
-#default_run: yes
-#
-# If "default_run" = "yes" the default for all modules is enabled (yes).
-# Setting any of these to "no" will disable it.
-#
-# If "default_run" = "no" the default for all modules is disabled (no).
-# Setting any of these to "yes" will enable it.
-
-# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
-gc_run: yes
-
-# Garbage collection interval in seconds. Default is 300.
-gc_interval: 300
-
-# adaptec_raid: yes
-# alarms: yes
-# am2320: yes
-# anomalies: no
-# beanstalk: yes
-# bind_rndc: yes
-# boinc: yes
-# ceph: yes
-# changefinder: no
-# dovecot: yes
-
-# this is just an example
-example: no
-
-# exim: yes
-# fail2ban: yes
-# gearman: yes
-go_expvar: no
-
-# haproxy: yes
-# hddtemp: yes
-hpssa: no
-# icecast: yes
-# ipfs: yes
-# litespeed: yes
-# megacli: yes
-# memcached: yes
-# mongodb: yes
-# monit: yes
-# nvidia_smi: yes
-# nsd: yes
-# openldap: yes
-# oracledb: yes
-# pandas: yes
-# postfix: yes
-# puppet: yes
-# rethinkdbs: yes
-# retroshare: yes
-# riakkv: yes
-# samba: yes
-# sensors: yes
-# smartd_log: yes
-# spigotmc: yes
-# squid: yes
-# traefik: yes
-# tomcat: yes
-# tor: yes
-# uwsgi: yes
-# varnish: yes
-# w1sensor: yes
-# zscores: no
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
deleted file mode 100644
index 86fea209c..000000000
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ /dev/null
@@ -1,946 +0,0 @@
-#!/usr/bin/env bash
-'''':;
-pybinary=$(which python3 || which python || which python2)
-filtered=()
-for arg in "$@"
-do
- case $arg in
- -p*) pybinary=${arg:2}
- shift 1 ;;
- *) filtered+=("$arg") ;;
- esac
-done
-if [ "$pybinary" = "" ]
-then
- echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
- exit 1
-fi
-exec "$pybinary" "$0" "${filtered[@]}" # '''
-
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (l2isbad)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import collections
-import copy
-import gc
-import json
-import os
-import pprint
-import re
-import sys
-import threading
-import time
-import types
-
-try:
- from queue import Queue
-except ImportError:
- from Queue import Queue
-
-PY_VERSION = sys.version_info[:2] # (major=3, minor=7, micro=3, releaselevel='final', serial=0)
-
-if PY_VERSION > (3, 1):
- from importlib.machinery import SourceFileLoader
-else:
- from imp import load_source as SourceFileLoader
-
-ENV_NETDATA_USER_CONFIG_DIR = 'NETDATA_USER_CONFIG_DIR'
-ENV_NETDATA_STOCK_CONFIG_DIR = 'NETDATA_STOCK_CONFIG_DIR'
-ENV_NETDATA_PLUGINS_DIR = 'NETDATA_PLUGINS_DIR'
-ENV_NETDATA_USER_PLUGINS_DIRS = 'NETDATA_USER_PLUGINS_DIRS'
-ENV_NETDATA_LIB_DIR = 'NETDATA_LIB_DIR'
-ENV_NETDATA_UPDATE_EVERY = 'NETDATA_UPDATE_EVERY'
-ENV_NETDATA_LOCK_DIR = 'NETDATA_LOCK_DIR'
-
-
-def add_pythond_packages():
- pluginsd = os.getenv(ENV_NETDATA_PLUGINS_DIR, os.path.dirname(__file__))
- pythond = os.path.abspath(pluginsd + '/../python.d')
- packages = os.path.join(pythond, 'python_modules')
- sys.path.append(packages)
-
-
-add_pythond_packages()
-
-from bases.collection import safe_print
-from bases.loggers import PythonDLogger
-from bases.loaders import load_config
-from third_party import filelock
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-
-def dirs():
- var_lib = os.getenv(
- ENV_NETDATA_LIB_DIR,
- '@varlibdir_POST@',
- )
- plugin_user_config = os.getenv(
- ENV_NETDATA_USER_CONFIG_DIR,
- '@configdir_POST@',
- )
- plugin_stock_config = os.getenv(
- ENV_NETDATA_STOCK_CONFIG_DIR,
- '@libconfigdir_POST@',
- )
- pluginsd = os.getenv(
- ENV_NETDATA_PLUGINS_DIR,
- os.path.dirname(__file__),
- )
- locks = os.getenv(
- ENV_NETDATA_LOCK_DIR,
- os.path.join('@varlibdir_POST@', 'lock')
- )
- modules_user_config = os.path.join(plugin_user_config, 'python.d')
- modules_stock_config = os.path.join(plugin_stock_config, 'python.d')
- modules = os.path.abspath(pluginsd + '/../python.d')
- user_modules = [os.path.join(p, 'python.d') for p in
- os.getenv(ENV_NETDATA_USER_PLUGINS_DIRS, "").split(" ") if
- p]
-
- Dirs = collections.namedtuple(
- 'Dirs',
- [
- 'plugin_user_config',
- 'plugin_stock_config',
- 'modules_user_config',
- 'modules_stock_config',
- 'modules',
- 'user_modules',
- 'var_lib',
- 'locks',
- ]
- )
- return Dirs(
- plugin_user_config,
- plugin_stock_config,
- modules_user_config,
- modules_stock_config,
- modules,
- user_modules,
- var_lib,
- locks,
- )
-
-
-DIRS = dirs()
-
-IS_ATTY = sys.stdout.isatty() or sys.stderr.isatty()
-
-MODULE_SUFFIX = '.chart.py'
-
-
-def find_available_modules(*directories):
- AvailableModule = collections.namedtuple(
- 'AvailableModule',
- [
- 'filepath',
- 'name',
- ]
- )
- available = list()
- for d in directories:
- try:
- if not os.path.isdir(d):
- continue
- files = sorted(os.listdir(d))
- except OSError:
- continue
- modules = [m for m in files if m.endswith(MODULE_SUFFIX)]
- available.extend([AvailableModule(os.path.join(d, m), m[:-len(MODULE_SUFFIX)]) for m in modules])
-
- return available
-
-
-def available_modules():
- obsolete = (
- 'apache_cache', # replaced by web_log
- 'cpuidle', # rewritten in C
- 'cpufreq', # rewritten in C
- 'gunicorn_log', # replaced by web_log
- 'linux_power_supply', # rewritten in C
- 'nginx_log', # replaced by web_log
- 'mdstat', # rewritten in C
- 'sslcheck', # rewritten in Go, memory leak bug https://github.com/netdata/netdata/issues/5624
- 'unbound', # rewritten in Go
- )
-
- stock = [m for m in find_available_modules(DIRS.modules) if m.name not in obsolete]
- user = find_available_modules(*DIRS.user_modules)
-
- available, seen = list(), set()
- for m in user + stock:
- if m.name in seen:
- continue
- seen.add(m.name)
- available.append(m)
-
- return available
-
-
-AVAILABLE_MODULES = available_modules()
-
-JOB_BASE_CONF = {
- 'update_every': int(os.getenv(ENV_NETDATA_UPDATE_EVERY, 1)),
- 'priority': 60000,
- 'autodetection_retry': 0,
- 'chart_cleanup': 10,
- 'penalty': True,
- 'name': str(),
-}
-
-PLUGIN_BASE_CONF = {
- 'enabled': True,
- 'default_run': True,
- 'gc_run': True,
- 'gc_interval': 300,
-}
-
-
-def multi_path_find(name, *paths):
- for path in paths:
- abs_name = os.path.join(path, name)
- if os.path.isfile(abs_name):
- return abs_name
- return str()
-
-
-def load_module(name, filepath):
- module = SourceFileLoader('pythond_' + name, filepath)
- if isinstance(module, types.ModuleType):
- return module
- return module.load_module()
-
-
-class ModuleConfig:
- def __init__(self, name, config=None):
- self.name = name
- self.config = config or OrderedDict()
- self.is_stock = False
-
- def load(self, abs_path):
- if not IS_ATTY:
- self.is_stock = abs_path.startswith(DIRS.modules_stock_config)
- self.config.update(load_config(abs_path) or dict())
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.config[k]) for k in keys if k in self.config)
-
- def create_job(self, job_name, job_config=None):
- job_config = job_config or dict()
-
- config = OrderedDict()
- config.update(job_config)
- config['job_name'] = job_name
- config['__is_stock'] = self.is_stock
- for k, v in self.defaults().items():
- config.setdefault(k, v)
-
- return config
-
- def job_names(self):
- return [v for v in self.config if isinstance(self.config.get(v), dict)]
-
- def single_job(self):
- return [self.create_job(self.name, self.config)]
-
- def multi_job(self):
- return [self.create_job(n, self.config[n]) for n in self.job_names()]
-
- def create_jobs(self):
- return self.multi_job() or self.single_job()
-
-
-class JobsConfigsBuilder:
- def __init__(self, config_dirs):
- self.config_dirs = config_dirs
- self.log = PythonDLogger()
- self.job_defaults = None
- self.module_defaults = None
- self.min_update_every = None
-
- def load_module_config(self, module_name):
- name = '{0}.conf'.format(module_name)
- self.log.debug("[{0}] looking for '{1}' in {2}".format(module_name, name, self.config_dirs))
- config = ModuleConfig(module_name)
-
- abs_path = multi_path_find(name, *self.config_dirs)
- if not abs_path:
- self.log.warning("[{0}] '{1}' was not found".format(module_name, name))
- return config
-
- self.log.debug("[{0}] loading '{1}'".format(module_name, abs_path))
- try:
- config.load(abs_path)
- except Exception as error:
- self.log.error("[{0}] error on loading '{1}' : {2}".format(module_name, abs_path, repr(error)))
- return None
-
- self.log.debug("[{0}] '{1}' is loaded".format(module_name, abs_path))
- return config
-
- @staticmethod
- def apply_defaults(jobs, defaults):
- if defaults is None:
- return
- for k, v in defaults.items():
- for job in jobs:
- job.setdefault(k, v)
-
- def set_min_update_every(self, jobs, min_update_every):
- if min_update_every is None:
- return
- for job in jobs:
- if 'update_every' in job and job['update_every'] < self.min_update_every:
- job['update_every'] = self.min_update_every
-
- def build(self, module_name):
- config = self.load_module_config(module_name)
- if config is None:
- return None
-
- configs = config.create_jobs()
- if not config.is_stock:
- self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
-
- self.apply_defaults(configs, self.module_defaults)
- self.apply_defaults(configs, self.job_defaults)
- self.set_min_update_every(configs, self.min_update_every)
-
- return configs
-
-
-JOB_STATUS_ACTIVE = 'active'
-JOB_STATUS_RECOVERING = 'recovering'
-JOB_STATUS_DROPPED = 'dropped'
-JOB_STATUS_INIT = 'initial'
-
-
-class Job(threading.Thread):
- inf = -1
-
- def __init__(self, service, module_name, config):
- threading.Thread.__init__(self)
- self.daemon = True
- self.service = service
- self.module_name = module_name
- self.config = config
- self.real_name = config['job_name']
- self.actual_name = config['override_name'] or self.real_name
- self.autodetection_retry = config['autodetection_retry']
- self.checks = self.inf
- self.job = None
- self.is_stock = config.get('__is_stock', False)
- self.status = JOB_STATUS_INIT
-
- def is_inited(self):
- return self.job is not None
-
- def init(self):
- self.job = self.service(configuration=copy.deepcopy(self.config))
-
- def full_name(self):
- return self.job.name
-
- def check(self):
- if self.is_stock:
- self.job.logger.mute()
-
- ok = self.job.check()
-
- self.job.logger.unmute()
- self.checks -= self.checks != self.inf and not ok
-
- return ok
-
- def create(self):
- self.job.create()
-
- def need_to_recheck(self):
- return self.autodetection_retry != 0 and self.checks != 0
-
- def run(self):
- self.job.run()
-
-
-class ModuleSrc:
- def __init__(self, m):
- self.name = m.name
- self.filepath = m.filepath
- self.src = None
-
- def load(self):
- self.src = load_module(self.name, self.filepath)
-
- def get(self, key):
- return getattr(self.src, key, None)
-
- def service(self):
- return self.get('Service')
-
- def defaults(self):
- keys = (
- 'update_every',
- 'priority',
- 'autodetection_retry',
- 'chart_cleanup',
- 'penalty',
- )
- return dict((k, self.get(k)) for k in keys if self.get(k) is not None)
-
- def is_disabled_by_default(self):
- return bool(self.get('disabled_by_default'))
-
-
-class JobsStatuses:
- def __init__(self):
- self.items = OrderedDict()
-
- def dump(self):
- return json.dumps(self.items, indent=2)
-
- def get(self, module_name, job_name):
- if module_name not in self.items:
- return None
- return self.items[module_name].get(job_name)
-
- def has(self, module_name, job_name):
- return self.get(module_name, job_name) is not None
-
- def from_file(self, path):
- with open(path) as f:
- data = json.load(f)
- return self.from_json(data)
-
- @staticmethod
- def from_json(items):
- if not isinstance(items, dict):
- raise Exception('items obj has wrong type : {0}'.format(type(items)))
- if not items:
- return JobsStatuses()
-
- v = OrderedDict()
- for mod_name in sorted(items):
- if not items[mod_name]:
- continue
- v[mod_name] = OrderedDict()
- for job_name in sorted(items[mod_name]):
- v[mod_name][job_name] = items[mod_name][job_name]
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
- @staticmethod
- def from_jobs(jobs):
- v = OrderedDict()
- for job in jobs:
- status = job.status
- if status not in (JOB_STATUS_ACTIVE, JOB_STATUS_RECOVERING):
- continue
- if job.module_name not in v:
- v[job.module_name] = OrderedDict()
- v[job.module_name][job.real_name] = status
-
- rv = JobsStatuses()
- rv.items = v
- return rv
-
-
-class StdoutSaver:
- @staticmethod
- def save(dump):
- print(dump)
-
-
-class CachedFileSaver:
- def __init__(self, path):
- self.last_save_success = False
- self.last_saved_dump = str()
- self.path = path
-
- def save(self, dump):
- if self.last_save_success and self.last_saved_dump == dump:
- return
- try:
- with open(self.path, 'w') as out:
- out.write(dump)
- except Exception:
- self.last_save_success = False
- raise
- self.last_saved_dump = dump
- self.last_save_success = True
-
-
-class PluginConfig(dict):
- def __init__(self, *args):
- dict.__init__(self, *args)
-
- def is_module_explicitly_enabled(self, module_name):
- return self._is_module_enabled(module_name, True)
-
- def is_module_enabled(self, module_name):
- return self._is_module_enabled(module_name, False)
-
- def _is_module_enabled(self, module_name, explicit):
- if module_name in self:
- return self[module_name]
- if explicit:
- return False
- return self['default_run']
-
-
-class FileLockRegistry:
- def __init__(self, path):
- self.path = path
- self.locks = dict()
-
- @staticmethod
- def rename(name):
- # go version name is 'docker'
- if name.startswith("dockerd"):
- name = "docker" + name[7:]
- return name
-
- def register(self, name):
- name = self.rename(name)
- if name in self.locks:
- return
- file = os.path.join(self.path, '{0}.collector.lock'.format(name))
- lock = filelock.FileLock(file)
- lock.acquire(timeout=0)
- self.locks[name] = lock
-
- def unregister(self, name):
- name = self.rename(name)
- if name not in self.locks:
- return
- lock = self.locks[name]
- lock.release()
- del self.locks[name]
-
-
-class DummyRegistry:
- def register(self, name):
- pass
-
- def unregister(self, name):
- pass
-
-
-class Plugin:
- config_name = 'python.d.conf'
- jobs_status_dump_name = 'pythond-jobs-statuses.json'
-
- def __init__(self, modules_to_run, min_update_every, registry):
- self.modules_to_run = modules_to_run
- self.min_update_every = min_update_every
- self.config = PluginConfig(PLUGIN_BASE_CONF)
- self.log = PythonDLogger()
- self.registry = registry
- self.started_jobs = collections.defaultdict(dict)
- self.jobs = list()
- self.saver = None
- self.runs = 0
-
- def load_config_file(self, filepath, expected):
- self.log.debug("looking for '{0}'".format(filepath))
- if not os.path.isfile(filepath):
- log = self.log.info if not expected else self.log.error
- log("'{0}' was not found".format(filepath))
- return dict()
- try:
- config = load_config(filepath)
- except Exception as error:
- self.log.error("error on loading '{0}' : {1}".format(filepath, repr(error)))
- return dict()
- self.log.debug("'{0}' is loaded".format(filepath))
- return config
-
- def load_config(self):
- user_config = self.load_config_file(
- filepath=os.path.join(DIRS.plugin_user_config, self.config_name),
- expected=False,
- )
- stock_config = self.load_config_file(
- filepath=os.path.join(DIRS.plugin_stock_config, self.config_name),
- expected=True,
- )
- self.config.update(stock_config)
- self.config.update(user_config)
-
- def load_job_statuses(self):
- self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib))
- abs_path = multi_path_find(self.jobs_status_dump_name, DIRS.var_lib)
- if not abs_path:
- self.log.warning("'{0}' was not found".format(self.jobs_status_dump_name))
- return
-
- self.log.debug("loading '{0}'".format(abs_path))
- try:
- statuses = JobsStatuses().from_file(abs_path)
- except Exception as error:
- self.log.error("'{0}' invalid JSON format: {1}".format(
- abs_path, ' '.join([v.strip() for v in str(error).split('\n')])))
- return None
- self.log.debug("'{0}' is loaded".format(abs_path))
- return statuses
-
- def create_jobs(self, job_statuses=None):
- paths = [
- DIRS.modules_user_config,
- DIRS.modules_stock_config,
- ]
-
- builder = JobsConfigsBuilder(paths)
- builder.job_defaults = JOB_BASE_CONF
- builder.min_update_every = self.min_update_every
-
- jobs = list()
- for m in self.modules_to_run:
- if not self.config.is_module_enabled(m.name):
- self.log.info("[{0}] is disabled in the configuration file, skipping it".format(m.name))
- continue
-
- src = ModuleSrc(m)
- try:
- src.load()
- except Exception as error:
- self.log.warning("[{0}] error on loading source : {1}, skipping it".format(m.name, repr(error)))
- continue
- self.log.debug("[{0}] loaded module source : '{1}'".format(m.name, m.filepath))
-
- if not (src.service() and callable(src.service())):
- self.log.warning("[{0}] has no callable Service object, skipping it".format(m.name))
- continue
-
- if src.is_disabled_by_default() and not self.config.is_module_explicitly_enabled(m.name):
- self.log.info("[{0}] is disabled by default, skipping it".format(m.name))
- continue
-
- builder.module_defaults = src.defaults()
- configs = builder.build(m.name)
- if not configs:
- self.log.info("[{0}] has no job configs, skipping it".format(m.name))
- continue
-
- for config in configs:
- config['job_name'] = re.sub(r'\s+', '_', config['job_name'])
- config['override_name'] = re.sub(r'\s+', '_', config.pop('name'))
-
- job = Job(src.service(), m.name, config)
-
- was_previously_active = job_statuses and job_statuses.has(job.module_name, job.real_name)
- if was_previously_active and job.autodetection_retry == 0:
- self.log.debug('{0}[{1}] was previously active, applying recovering settings'.format(
- job.module_name, job.real_name))
- job.checks = 11
- job.autodetection_retry = 30
-
- jobs.append(job)
-
- return jobs
-
- def setup(self):
- self.load_config()
-
- if not self.config['enabled']:
- self.log.info('disabled in the configuration file')
- return False
-
- statuses = self.load_job_statuses()
-
- self.jobs = self.create_jobs(statuses)
- if not self.jobs:
- self.log.info('no jobs to run')
- return False
-
- if not IS_ATTY:
- abs_path = os.path.join(DIRS.var_lib, self.jobs_status_dump_name)
- self.saver = CachedFileSaver(abs_path)
- return True
-
- def start_jobs(self, *jobs):
- for job in jobs:
- if job.status not in (JOB_STATUS_INIT, JOB_STATUS_RECOVERING):
- continue
-
- if job.actual_name in self.started_jobs[job.module_name]:
- self.log.info('{0}[{1}] : already served by another job, skipping it'.format(
- job.module_name, job.real_name))
- job.status = JOB_STATUS_DROPPED
- continue
-
- if not job.is_inited():
- try:
- job.init()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on init : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
-
- try:
- ok = job.check()
- except Exception as error:
- if not job.is_stock:
- self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- continue
- if not ok:
- if not job.is_stock:
- self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
- job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
- continue
- self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
-
- try:
- self.registry.register(job.full_name())
- except filelock.Timeout as error:
- self.log.info('{0}[{1}] : already registered by another process, skipping the job ({2})'.format(
- job.module_name, job.real_name, error))
- job.status = JOB_STATUS_DROPPED
- continue
- except Exception as error:
- self.log.warning('{0}[{1}] : registration failed: {2}, skipping the job'.format(
- job.module_name, job.real_name, error))
- job.status = JOB_STATUS_DROPPED
- continue
-
- try:
- job.create()
- except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on create : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
- job.status = JOB_STATUS_DROPPED
- try:
- self.registry.unregister(job.full_name())
- except Exception as error:
- self.log.warning('{0}[{1}] : deregistration failed: {2}'.format(
- job.module_name, job.real_name, error))
- continue
-
- self.started_jobs[job.module_name] = job.actual_name
- job.status = JOB_STATUS_ACTIVE
- job.start()
-
- @staticmethod
- def keep_alive():
- if not IS_ATTY:
- safe_print('\n')
-
- def garbage_collection(self):
- if self.config['gc_run'] and self.runs % self.config['gc_interval'] == 0:
- v = gc.collect()
- self.log.debug('GC collection run result: {0}'.format(v))
-
- def restart_recovering_jobs(self):
- for job in self.jobs:
- if job.status != JOB_STATUS_RECOVERING:
- continue
- if self.runs % job.autodetection_retry != 0:
- continue
- self.start_jobs(job)
-
- def cleanup_jobs(self):
- self.jobs = [j for j in self.jobs if j.status != JOB_STATUS_DROPPED]
-
- def have_alive_jobs(self):
- return next(
- (True for job in self.jobs if job.status in (JOB_STATUS_RECOVERING, JOB_STATUS_ACTIVE)),
- False,
- )
-
- def save_job_statuses(self):
- if self.saver is None:
- return
- if self.runs % 10 != 0:
- return
- dump = JobsStatuses().from_jobs(self.jobs).dump()
- try:
- self.saver.save(dump)
- except Exception as error:
- self.log.error("error on saving jobs statuses dump : {0}".format(repr(error)))
-
- def serve_once(self):
- if not self.have_alive_jobs():
- self.log.info('no jobs to serve')
- return False
-
- time.sleep(1)
- self.runs += 1
-
- self.keep_alive()
- self.garbage_collection()
- self.cleanup_jobs()
- self.restart_recovering_jobs()
- self.save_job_statuses()
- return True
-
- def serve(self):
- while self.serve_once():
- pass
-
- def run(self):
- self.start_jobs(*self.jobs)
- self.serve()
-
-
-def parse_command_line():
- opts = sys.argv[:][1:]
-
- debug = False
- trace = False
- nolock = False
- update_every = 1
- modules_to_run = list()
-
- def find_first_positive_int(values):
- return next((v for v in values if v.isdigit() and int(v) >= 1), None)
-
- u = find_first_positive_int(opts)
- if u is not None:
- update_every = int(u)
- opts.remove(u)
- if 'debug' in opts:
- debug = True
- opts.remove('debug')
- if 'trace' in opts:
- trace = True
- opts.remove('trace')
- if 'nolock' in opts:
- nolock = True
- opts.remove('nolock')
- if opts:
- modules_to_run = list(opts)
-
- cmd = collections.namedtuple(
- 'CMD',
- [
- 'update_every',
- 'debug',
- 'trace',
- 'nolock',
- 'modules_to_run',
- ])
- return cmd(
- update_every,
- debug,
- trace,
- nolock,
- modules_to_run,
- )
-
-
-def guess_module(modules, *names):
- def guess(n):
- found = None
- for i, _ in enumerate(n):
- cur = [x for x in modules if x.startswith(name[:i + 1])]
- if not cur:
- return found
- found = cur
- return found
-
- guessed = list()
- for name in names:
- name = name.lower()
- m = guess(name)
- if m:
- guessed.extend(m)
- return sorted(set(guessed))
-
-
-def disable():
- if not IS_ATTY:
- safe_print('DISABLE')
- exit(0)
-
-
-def get_modules_to_run(cmd):
- if not cmd.modules_to_run:
- return AVAILABLE_MODULES
-
- modules_to_run, seen = list(), set()
- for m in AVAILABLE_MODULES:
- if m.name not in cmd.modules_to_run or m.name in seen:
- continue
- seen.add(m.name)
- modules_to_run.append(m)
-
- return modules_to_run
-
-
-def main():
- cmd = parse_command_line()
- log = PythonDLogger()
-
- level = os.getenv('NETDATA_LOG_LEVEL') or str()
- level = level.lower()
- if level == 'debug':
- log.logger.severity = 'DEBUG'
- elif level == 'info':
- log.logger.severity = 'INFO'
- elif level == 'warn' or level == 'warning':
- log.logger.severity = 'WARNING'
- elif level == 'err' or level == 'error':
- log.logger.severity = 'ERROR'
-
- if cmd.debug:
- log.logger.severity = 'DEBUG'
- if cmd.trace:
- log.log_traceback = True
-
- log.info('using python v{0}'.format(PY_VERSION[0]))
-
- if DIRS.locks and not cmd.nolock:
- registry = FileLockRegistry(DIRS.locks)
- else:
- registry = DummyRegistry()
-
- unique_avail_module_names = set([m.name for m in AVAILABLE_MODULES])
- unknown = set(cmd.modules_to_run) - unique_avail_module_names
- if unknown:
- log.error('unknown modules : {0}'.format(sorted(list(unknown))))
- guessed = guess_module(unique_avail_module_names, *cmd.modules_to_run)
- if guessed:
- log.info('probably you meant : \n{0}'.format(pprint.pformat(guessed, width=1)))
- return
-
- p = Plugin(
- get_modules_to_run(cmd),
- cmd.update_every,
- registry,
- )
-
- # cheap attempt to reduce chance of python.d job running before go.d
- # TODO: better implementation needed
- if not IS_ATTY:
- time.sleep(1.5)
-
- try:
- if not p.setup():
- return
- p.run()
- except KeyboardInterrupt:
- pass
- log.info('exiting from main...')
-
-
-if __name__ == "__main__":
- main()
- disable()
diff --git a/collectors/python.d.plugin/python_modules/__init__.py b/collectors/python.d.plugin/python_modules/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
deleted file mode 100644
index a74b4239e..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-
-from subprocess import Popen, PIPE
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-
-class ExecutableService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.command = None
-
- def _get_raw_data(self, stderr=False, command=None):
- """
- Get raw data from executed command
- :return: <list>
- """
- command = command or self.command
- self.debug("Executing command '{0}'".format(' '.join(command)))
- try:
- p = Popen(command, stdout=PIPE, stderr=PIPE)
- except Exception as error:
- self.error('Executing command {0} resulted in error: {1}'.format(command, error))
- return None
-
- data = list()
- std = p.stderr if stderr else p.stdout
- for line in std:
- try:
- data.append(line.decode('utf-8'))
- except (TypeError, UnicodeDecodeError):
- continue
-
- return data
-
- def check(self):
- """
- Parse basic configuration, check if command is whitelisted and is returning values
- :return: <boolean>
- """
- # Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
- if 'command' in self.configuration:
- self.command = self.configuration['command']
-
- # "command" must be: 1.not None 2. type <str>
- if not (self.command and isinstance(self.command, str)):
- self.error('Command is not defined or command type is not <str>')
- return False
-
- # Split "command" into: 1. command <str> 2. options <list>
- command, opts = self.command.split()[0], self.command.split()[1:]
-
- # Check for "bad" symbols in options. No pipes, redirects etc.
- opts_list = ['&', '|', ';', '>', '<']
- bad_opts = set(''.join(opts)) & set(opts_list)
- if bad_opts:
- self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
- return False
-
- # Find absolute path ('echo' => '/bin/echo')
- if '/' not in command:
- command = find_binary(command)
- if not command:
- self.error('Can\'t locate "{command}" binary'.format(command=self.command))
- return False
- # Check if binary exist and executable
- else:
- if not os.access(command, os.X_OK):
- self.error('"{binary}" is not executable'.format(binary=command))
- return False
-
- self.command = [command] + opts if opts else [command]
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
- error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error('Command "{command}" returned no data'.format(command=self.command))
- return False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
deleted file mode 100644
index a55e33f52..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/LogService.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from glob import glob
-import sys
-import os
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-class LogService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.log_path = self.configuration.get('path')
- self.__glob_path = self.log_path
- self._last_position = 0
- self.__re_find = dict(current=0, run=0, maximum=60)
- self.__open_args = {'errors': 'replace'} if sys.version_info[0] > 2 else {}
-
- def _get_raw_data(self):
- """
- Get log lines since last poll
- :return: list
- """
- lines = list()
- try:
- if self.__re_find['current'] == self.__re_find['run']:
- self._find_recent_log_file()
- size = os.path.getsize(self.log_path)
- if size == self._last_position:
- self.__re_find['current'] += 1
- return list() # return empty list if nothing has changed
- elif size < self._last_position:
- self._last_position = 0 # read from beginning if file has shrunk
-
- with open(self.log_path, **self.__open_args) as fp:
- fp.seek(self._last_position)
- for line in fp:
- lines.append(line)
- self._last_position = fp.tell()
- self.__re_find['current'] = 0
- except (OSError, IOError) as error:
- self.__re_find['current'] += 1
- self.error(str(error))
-
- return lines or None
-
- def _find_recent_log_file(self):
- """
- :return:
- """
- self.__re_find['run'] = self.__re_find['maximum']
- self.__re_find['current'] = 0
- self.__glob_path = self.__glob_path or self.log_path # workaround for modules w/o config files
- path_list = glob(self.__glob_path)
- if path_list:
- self.log_path = max(path_list)
- return True
- return False
-
- def check(self):
- """
- Parse basic configuration and check if log file exists
- :return: boolean
- """
- if not self.log_path:
- self.error('No path to log specified')
- return None
-
- if self._find_recent_log_file() and os.access(self.log_path, os.R_OK) and os.path.isfile(self.log_path):
- return True
- self.error('Cannot access {0}'.format(self.log_path))
- return False
-
- def create(self):
- # set cursor at last byte of log file
- self._last_position = os.path.getsize(self.log_path)
- status = SimpleService.create(self)
- return status
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
deleted file mode 100644
index 7f5c7d221..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/MySQLService.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from sys import exc_info
-
-try:
- import MySQLdb
-
- PY_MYSQL = True
-except ImportError:
- try:
- import pymysql as MySQLdb
-
- PY_MYSQL = True
- except ImportError:
- PY_MYSQL = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-class MySQLService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.__connection = None
- self.__conn_properties = dict()
- self.extra_conn_properties = dict()
- self.__queries = self.configuration.get('queries', dict())
- self.queries = dict()
-
- def __connect(self):
- try:
- connection = MySQLdb.connect(connect_timeout=self.update_every, **self.__conn_properties)
- except (MySQLdb.MySQLError, TypeError, AttributeError) as error:
- return None, str(error)
- else:
- return connection, None
-
- def check(self):
- def get_connection_properties(conf, extra_conf):
- properties = dict()
- if conf.get('user'):
- properties['user'] = conf['user']
- if conf.get('pass'):
- properties['passwd'] = conf['pass']
-
- if conf.get('socket'):
- properties['unix_socket'] = conf['socket']
- elif conf.get('host'):
- properties['host'] = conf['host']
- properties['port'] = int(conf.get('port', 3306))
- elif conf.get('my.cnf'):
- properties['read_default_file'] = conf['my.cnf']
-
- if conf.get('ssl'):
- properties['ssl'] = conf['ssl']
-
- if isinstance(extra_conf, dict) and extra_conf:
- properties.update(extra_conf)
-
- return properties or None
-
- def is_valid_queries_dict(raw_queries, log_error):
- """
- :param raw_queries: dict:
- :param log_error: function:
- :return: dict or None
-
- raw_queries is valid when: type <dict> and not empty after is_valid_query(for all queries)
- """
-
- def is_valid_query(query):
- return all([isinstance(query, str),
- query.startswith(('SELECT', 'select', 'SHOW', 'show'))])
-
- if hasattr(raw_queries, 'keys') and raw_queries:
- valid_queries = dict([(n, q) for n, q in raw_queries.items() if is_valid_query(q)])
- bad_queries = set(raw_queries) - set(valid_queries)
-
- if bad_queries:
- log_error('Removed query(s): {queries}'.format(queries=bad_queries))
- return valid_queries
- else:
- log_error('Unsupported "queries" format. Must be not empty <dict>')
- return None
-
- if not PY_MYSQL:
- self.error('MySQLdb or PyMySQL module is needed to use mysql.chart.py plugin')
- return False
-
- # Preference: 1. "queries" from the configuration file 2. "queries" from the module
- self.queries = self.__queries or self.queries
- # Check if "self.queries" exist, not empty and all queries are in valid format
- self.queries = is_valid_queries_dict(self.queries, self.error)
- if not self.queries:
- return None
-
- # Get connection properties
- self.__conn_properties = get_connection_properties(self.configuration, self.extra_conn_properties)
- if not self.__conn_properties:
- self.error('Connection properties are missing')
- return False
-
- # Create connection to the database
- self.__connection, error = self.__connect()
- if error:
- self.error('Can\'t establish connection to MySQL: {error}'.format(error=error))
- return False
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Error: {error}'.format(error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error("_get_data() returned no data or type is not <dict>")
- return False
-
- def _get_raw_data(self, description=None):
- """
- Get raw data from MySQL server
- :return: dict: fetchall() or (fetchall(), description)
- """
-
- if not self.__connection:
- self.__connection, error = self.__connect()
- if error:
- return None
-
- raw_data = dict()
- queries = dict(self.queries)
- try:
- cursor = self.__connection.cursor()
- for name, query in queries.items():
- try:
- cursor.execute(query)
- except (MySQLdb.ProgrammingError, MySQLdb.OperationalError) as error:
- if self.__is_error_critical(err_class=exc_info()[0], err_text=str(error)):
- cursor.close()
- raise RuntimeError
- self.error('Removed query: {name}[{query}]. Error: error'.format(name=name,
- query=query,
- error=error))
- self.queries.pop(name)
- continue
- else:
- raw_data[name] = (cursor.fetchall(), cursor.description) if description else cursor.fetchall()
- cursor.close()
- self.__connection.commit()
- except (MySQLdb.MySQLError, RuntimeError, TypeError, AttributeError):
- self.__connection.close()
- self.__connection = None
- return None
- else:
- return raw_data or None
-
- @staticmethod
- def __is_error_critical(err_class, err_text):
- return err_class == MySQLdb.OperationalError and all(['denied' not in err_text,
- 'Unknown column' not in err_text])
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
deleted file mode 100644
index 3f122e1d9..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-
-from bases.charts import Charts, ChartError, create_runtime_chart
-from bases.collection import safe_print
-from bases.loggers import PythonDLogger
-from third_party.monotonic import monotonic
-from time import sleep, time
-
-RUNTIME_CHART_UPDATE = 'BEGIN netdata.runtime_{job_name} {since_last}\n' \
- 'SET run_time = {elapsed}\n' \
- 'END\n'
-
-PENALTY_EVERY = 5
-MAX_PENALTY = 10 * 60 # 10 minutes
-
-ND_INTERNAL_MONITORING_DISABLED = os.getenv("NETDATA_INTERNALS_MONITORING") == "NO"
-
-
-class RuntimeCounters:
- def __init__(self, configuration):
- """
- :param configuration: <dict>
- """
- self.update_every = int(configuration.pop('update_every'))
- self.do_penalty = configuration.pop('penalty')
-
- self.start_mono = 0
- self.start_real = 0
- self.retries = 0
- self.penalty = 0
- self.elapsed = 0
- self.prev_update = 0
-
- self.runs = 1
-
- def calc_next(self):
- self.start_mono = monotonic()
- return self.start_mono - (self.start_mono % self.update_every) + self.update_every + self.penalty
-
- def sleep_until_next(self):
- next_time = self.calc_next()
- while self.start_mono < next_time:
- sleep(next_time - self.start_mono)
- self.start_mono = monotonic()
- self.start_real = time()
-
- def handle_retries(self):
- self.retries += 1
- if self.do_penalty and self.retries % PENALTY_EVERY == 0:
- self.penalty = round(min(self.retries * self.update_every / 2, MAX_PENALTY))
-
-
-def clean_module_name(name):
- if name.startswith('pythond_'):
- return name[8:]
- return name
-
-
-class SimpleService(PythonDLogger, object):
- """
- Prototype of Service class.
- Implemented basic functionality to run jobs by `python.d.plugin`
- """
-
- def __init__(self, configuration, name=''):
- """
- :param configuration: <dict>
- :param name: <str>
- """
- PythonDLogger.__init__(self)
- self.configuration = configuration
- self.order = list()
- self.definitions = dict()
-
- self.module_name = clean_module_name(self.__module__)
- self.job_name = configuration.pop('job_name')
- self.actual_job_name = self.job_name or self.module_name
- self.override_name = configuration.pop('override_name')
- self.fake_name = None
-
- self._runtime_counters = RuntimeCounters(configuration=configuration)
- self.charts = Charts(job_name=self.actual_name,
- actual_job_name=self.actual_job_name,
- priority=configuration.pop('priority'),
- cleanup=configuration.pop('chart_cleanup'),
- get_update_every=self.get_update_every,
- module_name=self.module_name)
-
- def __repr__(self):
- return '<{cls_bases}: {name}>'.format(cls_bases=', '.join(c.__name__ for c in self.__class__.__bases__),
- name=self.name)
-
- @property
- def name(self):
- name = self.override_name or self.job_name
- if name and name != self.module_name:
- return '_'.join([self.module_name, name])
- return self.module_name
-
- def actual_name(self):
- return self.fake_name or self.name
-
- @property
- def runs_counter(self):
- return self._runtime_counters.runs
-
- @property
- def update_every(self):
- return self._runtime_counters.update_every
-
- @update_every.setter
- def update_every(self, value):
- """
- :param value: <int>
- :return:
- """
- self._runtime_counters.update_every = value
-
- def get_update_every(self):
- return self.update_every
-
- def check(self):
- """
- check() prototype
- :return: boolean
- """
- self.debug("job doesn't implement check() method. Using default which simply invokes get_data().")
- data = self.get_data()
- if data and isinstance(data, dict):
- return True
- self.debug('returned value is wrong: {0}'.format(data))
- return False
-
- @create_runtime_chart
- def create(self):
- for chart_name in self.order:
- chart_config = self.definitions.get(chart_name)
-
- if not chart_config:
- self.debug("create() => [NOT ADDED] chart '{chart_name}' not in definitions. "
- "Skipping it.".format(chart_name=chart_name))
- continue
-
- # create chart
- chart_params = [chart_name] + chart_config['options']
- try:
- self.charts.add_chart(params=chart_params)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (chart '{chart}': {error})".format(chart=chart_name,
- error=error))
- continue
-
- # add dimensions to chart
- for dimension in chart_config['lines']:
- try:
- self.charts[chart_name].add_dimension(dimension)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (dimension '{dimension}': {error})".format(dimension=dimension,
- error=error))
- continue
-
- # add variables to chart
- if 'variables' in chart_config:
- for variable in chart_config['variables']:
- try:
- self.charts[chart_name].add_variable(variable)
- except ChartError as error:
- self.error("create() => [NOT ADDED] (variable '{var}': {error})".format(var=variable,
- error=error))
- continue
-
- del self.order
- del self.definitions
-
- # True if job has at least 1 chart else False
- return bool(self.charts)
-
- def run(self):
- """
- Runs job in thread. Handles retries.
- Exits when job failed or timed out.
- :return: None
- """
- job = self._runtime_counters
- self.debug('started, update frequency: {freq}'.format(freq=job.update_every))
-
- while True:
- job.sleep_until_next()
-
- since = 0
- if job.prev_update:
- since = int((job.start_real - job.prev_update) * 1e6)
-
- try:
- updated = self.update(interval=since)
- except Exception as error:
- self.error('update() unhandled exception: {error}'.format(error=error))
- updated = False
-
- job.runs += 1
-
- if not updated:
- job.handle_retries()
- else:
- job.elapsed = int((monotonic() - job.start_mono) * 1e3)
- job.prev_update = job.start_real
- job.retries, job.penalty = 0, 0
- if not ND_INTERNAL_MONITORING_DISABLED:
- safe_print(RUNTIME_CHART_UPDATE.format(job_name=self.name,
- since_last=since,
- elapsed=job.elapsed))
- self.debug('update => [{status}] (elapsed time: {elapsed}, failed retries in a row: {retries})'.format(
- status='OK' if updated else 'FAILED',
- elapsed=job.elapsed if updated else '-',
- retries=job.retries))
-
- def update(self, interval):
- """
- :return:
- """
- data = self.get_data()
- if not data:
- self.debug('get_data() returned no data')
- return False
- elif not isinstance(data, dict):
- self.debug('get_data() returned incorrect type data')
- return False
-
- updated = False
-
- for chart in self.charts:
- if chart.flags.obsoleted:
- if chart.can_be_updated(data):
- chart.refresh()
- else:
- continue
- elif self.charts.cleanup and chart.penalty >= self.charts.cleanup:
- chart.obsolete()
- self.info("chart '{0}' was suppressed due to non updating".format(chart.name))
- continue
-
- ok = chart.update(data, interval)
- if ok:
- updated = True
-
- if not updated:
- self.debug('none of the charts has been updated')
-
- return updated
-
- def get_data(self):
- return self._get_data()
-
- def _get_data(self):
- raise NotImplementedError
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
deleted file mode 100644
index d6c755058..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SocketService.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import errno
-import socket
-
-try:
- import ssl
-except ImportError:
- _TLS_SUPPORT = False
-else:
- _TLS_SUPPORT = True
-
-if _TLS_SUPPORT:
- try:
- PROTOCOL_TLS = ssl.PROTOCOL_TLS
- except AttributeError:
- PROTOCOL_TLS = ssl.PROTOCOL_SSLv23
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-
-DEFAULT_CONNECT_TIMEOUT = 2.0
-DEFAULT_READ_TIMEOUT = 2.0
-DEFAULT_WRITE_TIMEOUT = 2.0
-
-
-class SocketService(SimpleService):
- def __init__(self, configuration=None, name=None):
- self._sock = None
- self._keep_alive = False
- self.host = 'localhost'
- self.port = None
- self.unix_socket = None
- self.dgram_socket = False
- self.request = ''
- self.tls = False
- self.cert = None
- self.key = None
- self.__socket_config = None
- self.__empty_request = "".encode()
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.connect_timeout = configuration.get('connect_timeout', DEFAULT_CONNECT_TIMEOUT)
- self.read_timeout = configuration.get('read_timeout', DEFAULT_READ_TIMEOUT)
- self.write_timeout = configuration.get('write_timeout', DEFAULT_WRITE_TIMEOUT)
-
- def _socket_error(self, message=None):
- if self.unix_socket is not None:
- self.error('unix socket "{socket}": {message}'.format(socket=self.unix_socket,
- message=message))
- else:
- if self.__socket_config is not None:
- _, _, _, _, sa = self.__socket_config
- self.error('socket to "{address}" port {port}: {message}'.format(address=sa[0],
- port=sa[1],
- message=message))
- else:
- self.error('unknown socket: {0}'.format(message))
-
- def _connect2socket(self, res=None):
- """
- Connect to a socket, passing the result of getaddrinfo()
- :return: boolean
- """
- if res is None:
- res = self.__socket_config
- if res is None:
- self.error("Cannot create socket to 'None':")
- return False
-
- af, sock_type, proto, _, sa = res
- try:
- self.debug('Creating socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self._sock = socket.socket(af, sock_type, proto)
- except socket.error as error:
- self.error('Failed to create socket "{address}", port {port}, error: {error}'.format(address=sa[0],
- port=sa[1],
- error=error))
- self._sock = None
- self.__socket_config = None
- return False
-
- if self.tls:
- try:
- self.debug('Encapsulating socket with TLS')
- self.debug('Using keyfile: {0}, certfile: {1}, cert_reqs: {2}, ssl_version: {3}'.format(
- self.key, self.cert, ssl.CERT_NONE, PROTOCOL_TLS
- ))
- self._sock = ssl.wrap_socket(self._sock,
- keyfile=self.key,
- certfile=self.cert,
- server_side=False,
- cert_reqs=ssl.CERT_NONE,
- ssl_version=PROTOCOL_TLS,
- )
- except (socket.error, ssl.SSLError, IOError, OSError) as error:
- self.error('failed to wrap socket : {0}'.format(repr(error)))
- self._disconnect()
- self.__socket_config = None
- return False
-
- try:
- self.debug('connecting socket to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self._sock.settimeout(self.connect_timeout)
- self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
- self._sock.connect(sa)
- except (socket.error, ssl.SSLError) as error:
- self.error('Failed to connect to "{address}", port {port}, error: {error}'.format(address=sa[0],
- port=sa[1],
- error=error))
- self._disconnect()
- self.__socket_config = None
- return False
-
- self.debug('connected to "{address}", port {port}'.format(address=sa[0], port=sa[1]))
- self.__socket_config = res
- return True
-
- def _connect2unixsocket(self):
- """
- Connect to a unix socket, given its filename
- :return: boolean
- """
- if self.unix_socket is None:
- self.error("cannot connect to unix socket 'None'")
- return False
-
- try:
- self.debug('attempting DGRAM unix socket "{0}"'.format(self.unix_socket))
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- self._sock.settimeout(self.connect_timeout)
- self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
- self._sock.connect(self.unix_socket)
- self.debug('connected DGRAM unix socket "{0}"'.format(self.unix_socket))
- return True
- except socket.error as error:
- self.debug('Failed to connect DGRAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
- error=error))
-
- try:
- self.debug('attempting STREAM unix socket "{0}"'.format(self.unix_socket))
- self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self._sock.settimeout(self.connect_timeout)
- self.debug('set socket connect timeout to: {0}'.format(self._sock.gettimeout()))
- self._sock.connect(self.unix_socket)
- self.debug('connected STREAM unix socket "{0}"'.format(self.unix_socket))
- return True
- except socket.error as error:
- self.debug('Failed to connect STREAM unix socket "{socket}": {error}'.format(socket=self.unix_socket,
- error=error))
- self._sock = None
- return False
-
- def _connect(self):
- """
- Recreate socket and connect to it since sockets cannot be reused after closing
- Available configurations are IPv6, IPv4 or UNIX socket
- :return:
- """
- try:
- if self.unix_socket is not None:
- self._connect2unixsocket()
-
- else:
- if self.__socket_config is not None:
- self._connect2socket()
- else:
- if self.dgram_socket:
- sock_type = socket.SOCK_DGRAM
- else:
- sock_type = socket.SOCK_STREAM
- for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, sock_type):
- if self._connect2socket(res):
- break
-
- except Exception as error:
- self.error('unhandled exception during connect : {0}'.format(repr(error)))
- self._sock = None
- self.__socket_config = None
-
- def _disconnect(self):
- """
- Close socket connection
- :return:
- """
- if self._sock is not None:
- try:
- self.debug('closing socket')
- self._sock.shutdown(2) # 0 - read, 1 - write, 2 - all
- self._sock.close()
- except Exception as error:
- if not (hasattr(error, 'errno') and error.errno == errno.ENOTCONN):
- self.error(error)
- self._sock = None
-
- def _send(self, request=None):
- """
- Send request.
- :return: boolean
- """
- # Send request if it is needed
- if self.request != self.__empty_request:
- try:
- self.debug('set socket write timeout to: {0}'.format(self._sock.gettimeout()))
- self._sock.settimeout(self.write_timeout)
- self.debug('sending request: {0}'.format(request or self.request))
- self._sock.send(request or self.request)
- except Exception as error:
- self._socket_error('error sending request: {0}'.format(error))
- self._disconnect()
- return False
- return True
-
- def _receive(self, raw=False):
- """
- Receive data from socket
- :param raw: set `True` to return bytes
- :type raw: bool
- :return: decoded str or raw bytes
- :rtype: str/bytes
- """
- data = "" if not raw else b""
- while True:
- self.debug('receiving response')
- try:
- self.debug('set socket read timeout to: {0}'.format(self._sock.gettimeout()))
- self._sock.settimeout(self.read_timeout)
- buf = self._sock.recv(4096)
- except Exception as error:
- self._socket_error('failed to receive response: {0}'.format(error))
- self._disconnect()
- break
-
- if buf is None or len(buf) == 0: # handle server disconnect
- if data == "" or data == b"":
- self._socket_error('unexpectedly disconnected')
- else:
- self.debug('server closed the connection')
- self._disconnect()
- break
-
- self.debug('received data')
- data += buf.decode('utf-8', 'ignore') if not raw else buf
- if self._check_raw_data(data):
- break
-
- self.debug(u'final response: {0}'.format(data if not raw else u'binary data'))
- return data
-
- def _get_raw_data(self, raw=False, request=None):
- """
- Get raw data with low-level "socket" module.
- :param raw: set `True` to return bytes
- :type raw: bool
- :return: decoded data (str) or raw data (bytes)
- :rtype: str/bytes
- """
- if self._sock is None:
- self._connect()
- if self._sock is None:
- return None
-
- # Send request if it is needed
- if not self._send(request):
- return None
-
- data = self._receive(raw)
-
- if not self._keep_alive:
- self._disconnect()
-
- return data
-
- @staticmethod
- def _check_raw_data(data):
- """
- Check if all data has been gathered from socket
- :param data: str
- :return: boolean
- """
- return bool(data)
-
- def _parse_config(self):
- """
- Parse configuration data
- :return: boolean
- """
- try:
- self.unix_socket = str(self.configuration['socket'])
- except (KeyError, TypeError):
- self.debug('No unix socket specified. Trying TCP/IP socket.')
- self.unix_socket = None
- try:
- self.host = str(self.configuration['host'])
- except (KeyError, TypeError):
- self.debug('No host specified. Using: "{0}"'.format(self.host))
- try:
- self.port = int(self.configuration['port'])
- except (KeyError, TypeError):
- self.debug('No port specified. Using: "{0}"'.format(self.port))
-
- self.tls = bool(self.configuration.get('tls', self.tls))
- if self.tls and not _TLS_SUPPORT:
- self.warning('TLS requested but no TLS module found, disabling TLS support.')
- self.tls = False
- if _TLS_SUPPORT and not self.tls:
- self.debug('No TLS preference specified, not using TLS.')
-
- if self.tls and _TLS_SUPPORT:
- self.key = self.configuration.get('tls_key_file')
- self.cert = self.configuration.get('tls_cert_file')
- if not self.cert:
- # If there's not a valid certificate, clear the key too.
- self.debug('No valid TLS client certificate configuration found.')
- self.key = None
- self.cert = None
- elif not self.key:
- # If a key isn't listed, the config may still be
- # valid, because there may be a key attached to the
- # certificate.
- self.info('No TLS client key specified, assuming it\'s attached to the certificate.')
- self.key = None
-
- try:
- self.request = str(self.configuration['request'])
- except (KeyError, TypeError):
- self.debug('No request specified. Using: "{0}"'.format(self.request))
-
- self.request = self.request.encode()
-
- def check(self):
- self._parse_config()
- return SimpleService.check(self)
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
deleted file mode 100644
index 76129d376..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Pawel Krupa (paulfantom)
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import urllib3
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- urllib3.disable_warnings()
-except AttributeError:
- pass
-
-URLLIB3_VERSION = urllib3.__version__
-URLLIB3 = 'urllib3'
-
-class UrlService(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.debug("{0} version: {1}".format(URLLIB3, URLLIB3_VERSION))
- self.url = self.configuration.get('url')
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.proxy_user = self.configuration.get('proxy_user')
- self.proxy_password = self.configuration.get('proxy_pass')
- self.proxy_url = self.configuration.get('proxy_url')
- self.method = self.configuration.get('method', 'GET')
- self.header = self.configuration.get('header')
- self.body = self.configuration.get('body')
- self.request_timeout = self.configuration.get('timeout', 1)
- self.respect_retry_after_header = self.configuration.get('respect_retry_after_header')
- self.tls_verify = self.configuration.get('tls_verify')
- self.tls_ca_file = self.configuration.get('tls_ca_file')
- self.tls_key_file = self.configuration.get('tls_key_file')
- self.tls_cert_file = self.configuration.get('tls_cert_file')
- self._manager = None
-
- def __make_headers(self, **header_kw):
- user = header_kw.get('user') or self.user
- password = header_kw.get('pass') or self.password
- proxy_user = header_kw.get('proxy_user') or self.proxy_user
- proxy_password = header_kw.get('proxy_pass') or self.proxy_password
- custom_header = header_kw.get('header') or self.header
- header_params = dict(keep_alive=True)
- proxy_header_params = dict()
- if user and password:
- header_params['basic_auth'] = '{user}:{password}'.format(user=user,
- password=password)
- if proxy_user and proxy_password:
- proxy_header_params['proxy_basic_auth'] = '{user}:{password}'.format(user=proxy_user,
- password=proxy_password)
- try:
- header, proxy_header = urllib3.make_headers(**header_params), urllib3.make_headers(**proxy_header_params)
- except TypeError as error:
- self.error('build_header() error: {error}'.format(error=error))
- return None, None
- else:
- header.update(custom_header or dict())
- return header, proxy_header
-
- def _build_manager(self, **header_kw):
- header, proxy_header = self.__make_headers(**header_kw)
- if header is None or proxy_header is None:
- return None
- proxy_url = header_kw.get('proxy_url') or self.proxy_url
- if proxy_url:
- manager = urllib3.ProxyManager
- params = dict(proxy_url=proxy_url, headers=header, proxy_headers=proxy_header)
- else:
- manager = urllib3.PoolManager
- params = dict(headers=header)
- tls_cert_file = self.tls_cert_file
- if tls_cert_file:
- params['cert_file'] = tls_cert_file
- # NOTE: key_file is useless without cert_file, but
- # cert_file may include the key as well.
- tls_key_file = self.tls_key_file
- if tls_key_file:
- params['key_file'] = tls_key_file
- tls_ca_file = self.tls_ca_file
- if tls_ca_file:
- params['ca_certs'] = tls_ca_file
- try:
- url = header_kw.get('url') or self.url
- is_https = url.startswith('https')
- if skip_tls_verify(is_https, self.tls_verify, tls_ca_file):
- params['ca_certs'] = None
- params['cert_reqs'] = 'CERT_NONE'
- if is_https:
- params['assert_hostname'] = False
- return manager(**params)
- except (urllib3.exceptions.ProxySchemeUnknown, TypeError) as error:
- self.error('build_manager() error:', str(error))
- return None
-
- def _get_raw_data(self, url=None, manager=None, **kwargs):
- """
- Get raw data from http request
- :return: str
- """
- try:
- response = self._do_request(url, manager, **kwargs)
- except Exception as error:
- self.error('Url: {url}. Error: {error}'.format(url=url or self.url, error=error))
- return None
-
- if response.status == 200:
- if isinstance(response.data, str):
- return response.data
- return response.data.decode(errors='ignore')
- else:
- self.debug('Url: {url}. Http response status code: {code}'.format(url=url or self.url, code=response.status))
- return None
-
- def _get_raw_data_with_status(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
- """
- Get status and response body content from http request. Does not catch exceptions
- :return: int, str
- """
- response = self._do_request(url, manager, retries, redirect, **kwargs)
-
- if isinstance(response.data, str):
- return response.status, response.data
- return response.status, response.data.decode(errors='ignore')
-
- def _do_request(self, url=None, manager=None, retries=1, redirect=True, **kwargs):
- """
- Get response from http request. Does not catch exceptions
- :return: HTTPResponse
- """
- url = url or self.url
- manager = manager or self._manager
- retry = urllib3.Retry(retries)
- if hasattr(retry, 'respect_retry_after_header'):
- retry.respect_retry_after_header = bool(self.respect_retry_after_header)
-
- if self.body:
- kwargs['body'] = self.body
-
- response = manager.request(
- method=self.method,
- url=url,
- timeout=self.request_timeout,
- retries=retry,
- headers=manager.headers,
- redirect=redirect,
- **kwargs
- )
- return response
-
- def check(self):
- """
- Format configuration data and try to connect to server
- :return: boolean
- """
- if not (self.url and isinstance(self.url, str)):
- self.error('URL is not defined or type is not <str>')
- return False
-
- self._manager = self._build_manager()
- if not self._manager:
- return False
-
- try:
- data = self._get_data()
- except Exception as error:
- self.error('_get_data() failed. Url: {url}. Error: {error}'.format(url=self.url, error=error))
- return False
-
- if isinstance(data, dict) and data:
- return True
- self.error('_get_data() returned no data or type is not <dict>')
- return False
-
-
-def skip_tls_verify(is_https, tls_verify, tls_ca_file):
- # default 'tls_verify' value is None
- # logic is:
- # - never skip if there is 'tls_ca_file' file
- # - skip by default for https
- # - do not skip by default for http
- if tls_ca_file:
- return False
- if is_https and not tls_verify:
- return True
- return tls_verify is False
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/bases/__init__.py b/collectors/python.d.plugin/python_modules/bases/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/bases/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/bases/charts.py b/collectors/python.d.plugin/python_modules/bases/charts.py
deleted file mode 100644
index 203ad1672..000000000
--- a/collectors/python.d.plugin/python_modules/bases/charts.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-
-from bases.collection import safe_print
-
-CHART_PARAMS = ['type', 'id', 'name', 'title', 'units', 'family', 'context', 'chart_type', 'hidden']
-DIMENSION_PARAMS = ['id', 'name', 'algorithm', 'multiplier', 'divisor', 'hidden']
-VARIABLE_PARAMS = ['id', 'value']
-
-CHART_TYPES = ['line', 'area', 'stacked']
-DIMENSION_ALGORITHMS = ['absolute', 'incremental', 'percentage-of-absolute-row', 'percentage-of-incremental-row']
-
-CHART_BEGIN = 'BEGIN {type}.{id} {since_last}\n'
-CHART_CREATE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '{hidden}' 'python.d.plugin' '{module_name}'\n"
-CHART_OBSOLETE = "CHART {type}.{id} '{name}' '{title}' '{units}' '{family}' '{context}' " \
- "{chart_type} {priority} {update_every} '{hidden} obsolete'\n"
-
-CLABEL_COLLECT_JOB = "CLABEL '_collect_job' '{actual_job_name}' '0'\n"
-CLABEL_COMMIT = "CLABEL_COMMIT\n"
-
-DIMENSION_CREATE = "DIMENSION '{id}' '{name}' {algorithm} {multiplier} {divisor} '{hidden} {obsolete}'\n"
-DIMENSION_SET = "SET '{id}' = {value}\n"
-
-CHART_VARIABLE_SET = "VARIABLE CHART '{id}' = {value}\n"
-
-# 1 is label source auto
-# https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L205
-RUNTIME_CHART_CREATE = "CHART netdata.runtime_{job_name} '' 'Execution time' 'ms' 'python.d' " \
- "netdata.pythond_runtime line 145000 {update_every} '' 'python.d.plugin' '{module_name}'\n" \
- "CLABEL '_collect_job' '{actual_job_name}' '1'\n" \
- "CLABEL_COMMIT\n" \
- "DIMENSION run_time 'run time' absolute 1 1\n"
-
-ND_INTERNAL_MONITORING_DISABLED = os.getenv("NETDATA_INTERNALS_MONITORING") == "NO"
-
-
-def create_runtime_chart(func):
- """
- Calls a wrapped function, then prints runtime chart to stdout.
-
- Used as a decorator for SimpleService.create() method.
- The whole point of making 'create runtime chart' functionality as a decorator was
- to help users who re-implements create() in theirs classes.
-
- :param func: class method
- :return:
- """
-
- def wrapper(*args, **kwargs):
- self = args[0]
- if not ND_INTERNAL_MONITORING_DISABLED:
- chart = RUNTIME_CHART_CREATE.format(
- job_name=self.name,
- actual_job_name=self.actual_job_name,
- update_every=self._runtime_counters.update_every,
- module_name=self.module_name,
- )
- safe_print(chart)
- ok = func(*args, **kwargs)
- return ok
-
- return wrapper
-
-
-class ChartError(Exception):
- """Base-class for all exceptions raised by this module"""
-
-
-class DuplicateItemError(ChartError):
- """Occurs when user re-adds a chart or a dimension that has already been added"""
-
-
-class ItemTypeError(ChartError):
- """Occurs when user passes value of wrong type to Chart, Dimension or ChartVariable class"""
-
-
-class ItemValueError(ChartError):
- """Occurs when user passes inappropriate value to Chart, Dimension or ChartVariable class"""
-
-
-class Charts:
- """Represent a collection of charts
-
- All charts stored in a dict.
- Chart is a instance of Chart class.
- Charts adding must be done using Charts.add_chart() method only"""
-
- def __init__(self, job_name, actual_job_name, priority, cleanup, get_update_every, module_name):
- """
- :param job_name: <bound method>
- :param priority: <int>
- :param get_update_every: <bound method>
- """
- self.job_name = job_name
- self.actual_job_name = actual_job_name
- self.priority = priority
- self.cleanup = cleanup
- self.get_update_every = get_update_every
- self.module_name = module_name
- self.charts = dict()
-
- def __len__(self):
- return len(self.charts)
-
- def __iter__(self):
- return iter(self.charts.values())
-
- def __repr__(self):
- return 'Charts({0})'.format(self)
-
- def __str__(self):
- return str([chart for chart in self.charts])
-
- def __contains__(self, item):
- return item in self.charts
-
- def __getitem__(self, item):
- return self.charts[item]
-
- def __delitem__(self, key):
- del self.charts[key]
-
- def __bool__(self):
- return bool(self.charts)
-
- def __nonzero__(self):
- return self.__bool__()
-
- def add_chart(self, params):
- """
- Create Chart instance and add it to the dict
-
- Manually adds job name, priority and update_every to params.
- :param params: <list>
- :return:
- """
- params = [self.job_name()] + params
- new_chart = Chart(params)
-
- new_chart.params['update_every'] = self.get_update_every()
- new_chart.params['priority'] = self.priority
- new_chart.params['module_name'] = self.module_name
- new_chart.params['actual_job_name'] = self.actual_job_name
-
- self.priority += 1
- self.charts[new_chart.id] = new_chart
-
- return new_chart
-
- def active_charts(self):
- return [chart.id for chart in self if not chart.flags.obsoleted]
-
-
-class Chart:
- """Represent a chart"""
-
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'chart' must be a list type")
- if not len(params) >= 8:
- raise ItemValueError("invalid value for 'chart', must be {0}".format(CHART_PARAMS))
-
- self.params = dict(zip(CHART_PARAMS, (p or str() for p in params)))
- self.name = '{type}.{id}'.format(type=self.params['type'],
- id=self.params['id'])
- if self.params.get('chart_type') not in CHART_TYPES:
- self.params['chart_type'] = 'absolute'
- hidden = str(self.params.get('hidden', ''))
- self.params['hidden'] = 'hidden' if hidden == 'hidden' else ''
-
- self.dimensions = list()
- self.variables = set()
- self.flags = ChartFlags()
- self.penalty = 0
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __repr__(self):
- return 'Chart({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __iter__(self):
- return iter(self.dimensions)
-
- def __contains__(self, item):
- return item in [dimension.id for dimension in self.dimensions]
-
- def add_variable(self, variable):
- """
- :param variable: <list>
- :return:
- """
- self.variables.add(ChartVariable(variable))
-
- def add_dimension(self, dimension):
- """
- :param dimension: <list>
- :return:
- """
- dim = Dimension(dimension)
-
- if dim.id in self:
- raise DuplicateItemError("'{dimension}' already in '{chart}' dimensions".format(dimension=dim.id,
- chart=self.name))
- self.refresh()
- self.dimensions.append(dim)
- return dim
-
- def del_dimension(self, dimension_id, hide=True):
- if dimension_id not in self:
- return
- idx = self.dimensions.index(dimension_id)
- dimension = self.dimensions[idx]
- if hide:
- dimension.params['hidden'] = 'hidden'
- dimension.params['obsolete'] = 'obsolete'
- self.create()
- self.dimensions.remove(dimension)
-
- def hide_dimension(self, dimension_id, reverse=False):
- if dimension_id not in self:
- return
- idx = self.dimensions.index(dimension_id)
- dimension = self.dimensions[idx]
- dimension.params['hidden'] = 'hidden' if not reverse else str()
- self.refresh()
-
- def create(self):
- """
- :return:
- """
- chart = CHART_CREATE.format(**self.params)
- labels = CLABEL_COLLECT_JOB.format(**self.params) + CLABEL_COMMIT
- dimensions = ''.join([dimension.create() for dimension in self.dimensions])
- variables = ''.join([var.set(var.value) for var in self.variables if var])
-
- self.flags.push = False
- self.flags.created = True
-
- safe_print(chart + labels + dimensions + variables)
-
- def can_be_updated(self, data):
- for dim in self.dimensions:
- if dim.get_value(data) is not None:
- return True
- return False
-
- def update(self, data, interval):
- updated_dimensions, updated_variables = str(), str()
-
- for dim in self.dimensions:
- value = dim.get_value(data)
- if value is not None:
- updated_dimensions += dim.set(value)
-
- for var in self.variables:
- value = var.get_value(data)
- if value is not None:
- updated_variables += var.set(value)
-
- if updated_dimensions:
- since_last = interval if self.flags.updated else 0
-
- if self.flags.push:
- self.create()
-
- chart_begin = CHART_BEGIN.format(type=self.type, id=self.id, since_last=since_last)
- safe_print(chart_begin, updated_dimensions, updated_variables, 'END\n')
-
- self.flags.updated = True
- self.penalty = 0
- else:
- self.penalty += 1
- self.flags.updated = False
-
- return bool(updated_dimensions)
-
- def obsolete(self):
- self.flags.obsoleted = True
- if self.flags.created:
- safe_print(CHART_OBSOLETE.format(**self.params))
-
- def refresh(self):
- self.penalty = 0
- self.flags.push = True
- self.flags.obsoleted = False
-
-
-class Dimension:
- """Represent a dimension"""
-
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'dimension' must be a list type")
- if not params:
- raise ItemValueError("invalid value for 'dimension', must be {0}".format(DIMENSION_PARAMS))
-
- self.params = dict(zip(DIMENSION_PARAMS, (p or str() for p in params)))
- self.params['name'] = self.params.get('name') or self.params['id']
-
- if self.params.get('algorithm') not in DIMENSION_ALGORITHMS:
- self.params['algorithm'] = 'absolute'
- if not isinstance(self.params.get('multiplier'), int):
- self.params['multiplier'] = 1
- if not isinstance(self.params.get('divisor'), int):
- self.params['divisor'] = 1
- self.params.setdefault('hidden', '')
- self.params.setdefault('obsolete', '')
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __repr__(self):
- return 'Dimension({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __eq__(self, other):
- if not isinstance(other, Dimension):
- return self.id == other
- return self.id == other.id
-
- def __ne__(self, other):
- return not self == other
-
- def __hash__(self):
- return hash(repr(self))
-
- def create(self):
- return DIMENSION_CREATE.format(**self.params)
-
- def set(self, value):
- """
- :param value: <str>: must be a digit
- :return:
- """
- return DIMENSION_SET.format(id=self.id,
- value=value)
-
- def get_value(self, data):
- try:
- return int(data[self.id])
- except (KeyError, TypeError):
- return None
-
-
-class ChartVariable:
- """Represent a chart variable"""
-
- def __init__(self, params):
- """
- :param params: <list>
- """
- if not isinstance(params, list):
- raise ItemTypeError("'variable' must be a list type")
- if not params:
- raise ItemValueError("invalid value for 'variable' must be: {0}".format(VARIABLE_PARAMS))
-
- self.params = dict(zip(VARIABLE_PARAMS, params))
- self.params.setdefault('value', None)
-
- def __getattr__(self, item):
- try:
- return self.params[item]
- except KeyError:
- raise AttributeError("'{instance}' has no attribute '{attr}'".format(instance=repr(self),
- attr=item))
-
- def __bool__(self):
- return self.value is not None
-
- def __nonzero__(self):
- return self.__bool__()
-
- def __repr__(self):
- return 'ChartVariable({0})'.format(self.id)
-
- def __str__(self):
- return self.id
-
- def __eq__(self, other):
- if isinstance(other, ChartVariable):
- return self.id == other.id
- return False
-
- def __ne__(self, other):
- return not self == other
-
- def __hash__(self):
- return hash(repr(self))
-
- def set(self, value):
- return CHART_VARIABLE_SET.format(id=self.id,
- value=value)
-
- def get_value(self, data):
- try:
- return int(data[self.id])
- except (KeyError, TypeError):
- return None
-
-
-class ChartFlags:
- def __init__(self):
- self.push = True
- self.created = False
- self.updated = False
- self.obsoleted = False
diff --git a/collectors/python.d.plugin/python_modules/bases/collection.py b/collectors/python.d.plugin/python_modules/bases/collection.py
deleted file mode 100644
index 93bf8cf05..000000000
--- a/collectors/python.d.plugin/python_modules/bases/collection.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-
-from threading import Lock
-
-PATH = os.getenv('PATH', '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin').split(':')
-
-CHART_BEGIN = 'BEGIN {0} {1}\n'
-CHART_CREATE = "CHART {0} '{1}' '{2}' '{3}' '{4}' '{5}' {6} {7} {8}\n"
-DIMENSION_CREATE = "DIMENSION '{0}' '{1}' {2} {3} {4} '{5}'\n"
-DIMENSION_SET = "SET '{0}' = {1}\n"
-
-print_lock = Lock()
-
-
-def setdefault_values(config, base_dict):
- for key, value in base_dict.items():
- config.setdefault(key, value)
- return config
-
-
-def run_and_exit(func):
- def wrapper(*args, **kwargs):
- func(*args, **kwargs)
- exit(1)
-
- return wrapper
-
-
-def on_try_except_finally(on_except=(None,), on_finally=(None,)):
- except_func = on_except[0]
- finally_func = on_finally[0]
-
- def decorator(func):
- def wrapper(*args, **kwargs):
- try:
- func(*args, **kwargs)
- except Exception:
- if except_func:
- except_func(*on_except[1:])
- finally:
- if finally_func:
- finally_func(*on_finally[1:])
-
- return wrapper
-
- return decorator
-
-
-def static_vars(**kwargs):
- def decorate(func):
- for k in kwargs:
- setattr(func, k, kwargs[k])
- return func
-
- return decorate
-
-
-@on_try_except_finally(on_except=(exit, 1))
-def safe_print(*msg):
- """
- :param msg:
- :return:
- """
- print_lock.acquire()
- print(''.join(msg))
- print_lock.release()
-
-
-def find_binary(binary):
- """
- :param binary: <str>
- :return:
- """
- for directory in PATH:
- binary_name = os.path.join(directory, binary)
- if os.path.isfile(binary_name) and os.access(binary_name, os.X_OK):
- return binary_name
- return None
-
-
-def read_last_line(f):
- with open(f, 'rb') as opened:
- opened.seek(-2, 2)
- while opened.read(1) != b'\n':
- opened.seek(-2, 1)
- if opened.tell() == 0:
- break
- result = opened.readline()
- return result.decode()
-
-
-def unicode_str(arg):
- """Return the argument as a unicode string.
-
- The `unicode` function has been removed from Python3 and `str` takes its
- place. This function is a helper which will try using Python 2's `unicode`
- and if it doesn't exist, assume we're using Python 3 and use `str`.
-
- :param arg:
- :return: <str>
- """
- # TODO: fix
- try:
- # https://github.com/netdata/netdata/issues/7613
- if isinstance(arg, unicode):
- return arg
- return unicode(arg, errors='ignore')
- # https://github.com/netdata/netdata/issues/7642
- except TypeError:
- return unicode(arg)
- except NameError:
- return str(arg)
diff --git a/collectors/python.d.plugin/python_modules/bases/loaders.py b/collectors/python.d.plugin/python_modules/bases/loaders.py
deleted file mode 100644
index 095f3a3b1..000000000
--- a/collectors/python.d.plugin/python_modules/bases/loaders.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-from sys import version_info
-
-PY_VERSION = version_info[:2]
-
-try:
- if PY_VERSION > (3, 1):
- from pyyaml3 import SafeLoader as YamlSafeLoader
- else:
- from pyyaml2 import SafeLoader as YamlSafeLoader
-except ImportError:
- from yaml import SafeLoader as YamlSafeLoader
-
-
-try:
- from collections import OrderedDict
-except ImportError:
- from third_party.ordereddict import OrderedDict
-
-
-DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' if PY_VERSION > (3, 1) else u'tag:yaml.org,2002:map'
-
-
-def dict_constructor(loader, node):
- return OrderedDict(loader.construct_pairs(node))
-
-
-YamlSafeLoader.add_constructor(DEFAULT_MAPPING_TAG, dict_constructor)
-
-
-def load_yaml(stream):
- loader = YamlSafeLoader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-
-def load_config(file_name):
- with open(file_name, 'r') as stream:
- return load_yaml(stream)
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
deleted file mode 100644
index 7ae8ab0c1..000000000
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description:
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import logging
-import os
-import stat
-import traceback
-
-from sys import exc_info
-
-try:
- from time import monotonic as time
-except ImportError:
- from time import time
-
-from bases.collection import on_try_except_finally, unicode_str
-
-LOGGING_LEVELS = {
- 'CRITICAL': 50,
- 'ERROR': 40,
- 'WARNING': 30,
- 'INFO': 20,
- 'DEBUG': 10,
- 'NOTSET': 0,
-}
-
-
-def is_stderr_connected_to_journal():
- journal_stream = os.environ.get("JOURNAL_STREAM")
- if not journal_stream:
- return False
-
- colon_index = journal_stream.find(":")
- if colon_index <= 0:
- return False
-
- device, inode = journal_stream[:colon_index], journal_stream[colon_index + 1:]
-
- try:
- device_number, inode_number = os.fstat(2)[stat.ST_DEV], os.fstat(2)[stat.ST_INO]
- except OSError:
- return False
-
- return str(device_number) == device and str(inode_number) == inode
-
-
-is_journal = is_stderr_connected_to_journal()
-
-DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
-PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s'
-
-if is_journal:
- DEFAULT_LOG_LINE_FORMAT = '%(name)s %(levelname)s : %(message)s'
- PYTHON_D_LOG_LINE_FORMAT = '%(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s '
-
-DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
-PYTHON_D_LOG_NAME = 'python.d'
-
-
-def add_traceback(func):
- def on_call(*args):
- self = args[0]
-
- if not self.log_traceback:
- func(*args)
- else:
- if exc_info()[0]:
- func(*args)
- func(self, traceback.format_exc())
- else:
- func(*args)
-
- return on_call
-
-
-class BaseLogger(object):
- def __init__(
- self,
- logger_name,
- log_fmt=DEFAULT_LOG_LINE_FORMAT,
- date_fmt=DEFAULT_LOG_TIME_FORMAT,
- handler=logging.StreamHandler,
- ):
- self.logger = logging.getLogger(logger_name)
- self._muted = False
- if not self.has_handlers():
- self.severity = 'INFO'
- self.logger.addHandler(handler())
- self.set_formatter(fmt=log_fmt, date_fmt=date_fmt)
-
- def __repr__(self):
- return '<Logger: {name})>'.format(name=self.logger.name)
-
- def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
- if self.has_handlers():
- self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
-
- def has_handlers(self):
- return self.logger.handlers
-
- @property
- def severity(self):
- return self.logger.getEffectiveLevel()
-
- @severity.setter
- def severity(self, level):
- if level in LOGGING_LEVELS:
- self.logger.setLevel(LOGGING_LEVELS[level])
-
- def _log(self, level, *msg, **kwargs):
- if not self._muted:
- self.logger.log(level, ' '.join(map(unicode_str, msg)), **kwargs)
-
- def debug(self, *msg, **kwargs):
- self._log(logging.DEBUG, *msg, **kwargs)
-
- def info(self, *msg, **kwargs):
- self._log(logging.INFO, *msg, **kwargs)
-
- def warning(self, *msg, **kwargs):
- self._log(logging.WARN, *msg, **kwargs)
-
- def error(self, *msg, **kwargs):
- self._log(logging.ERROR, *msg, **kwargs)
-
- def alert(self, *msg, **kwargs):
- self._log(logging.CRITICAL, *msg, **kwargs)
-
- @on_try_except_finally(on_finally=(exit, 1))
- def fatal(self, *msg, **kwargs):
- self._log(logging.CRITICAL, *msg, **kwargs)
-
- def mute(self):
- self._muted = True
-
- def unmute(self):
- self._muted = False
-
-
-class PythonDLogger(object):
- def __init__(
- self,
- logger_name=PYTHON_D_LOG_NAME,
- log_fmt=PYTHON_D_LOG_LINE_FORMAT,
- ):
- self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
- self.module_name = 'plugin'
- self.job_name = 'main'
-
- _LOG_TRACEBACK = False
-
- @property
- def log_traceback(self):
- return PythonDLogger._LOG_TRACEBACK
-
- @log_traceback.setter
- def log_traceback(self, value):
- PythonDLogger._LOG_TRACEBACK = value
-
- def debug(self, *msg):
- self.logger.debug(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
-
- def info(self, *msg):
- self.logger.info(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
-
- def warning(self, *msg):
- self.logger.warning(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
-
- @add_traceback
- def error(self, *msg):
- self.logger.error(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
-
- @add_traceback
- def alert(self, *msg):
- self.logger.alert(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
-
- def fatal(self, *msg):
- self.logger.fatal(*msg, extra={
- 'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name,
- })
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
deleted file mode 100644
index 4d560e438..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
-__version__ = '3.11'
-
-try:
- from cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- from StringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __metaclass__ = YAMLObjectMetaclass
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
- from_yaml = classmethod(from_yaml)
-
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
- to_yaml = classmethod(to_yaml)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
deleted file mode 100644
index 6b41b8067..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Composer', 'ComposerError']
-
-from error import MarkedYAMLError
-from events import *
-from nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer(object):
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor.encode('utf-8'), event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
deleted file mode 100644
index 8ad1b90a7..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
+++ /dev/null
@@ -1,676 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = generator.next()
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
- add_constructor = classmethod(add_constructor)
-
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
- add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError), exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- ur'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node).encode('utf-8')
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_long(self, node):
- return long(self.construct_yaml_int(node))
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = '__builtin__'
- object_name = name
- try:
- __import__(module_name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r" % (object_name.encode('utf-8'),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- class classobj: pass
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
deleted file mode 100644
index 2858ab479..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
deleted file mode 100644
index 3685cbeeb..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
deleted file mode 100644
index 9a460a0fd..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
+++ /dev/null
@@ -1,1141 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from error import YAMLError
-from events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter(object):
-
- DEFAULT_TAG_PREFIXES = {
- u'!' : u'!',
- u'tag:yaml.org,2002:' : u'!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = self.event.tags.keys()
- handles.sort()
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(u':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator(u'-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(u':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (handle.encode('utf-8')))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch.encode('utf-8'), handle.encode('utf-8')))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = self.tag_prefixes.keys()
- prefixes.sort()
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch.encode('utf-8'), anchor.encode('utf-8')))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E'
- or (self.allow_unicode
- and (u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += unicode(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/collectors/python.d.plugin/python_modules/pyyaml2/error.py
deleted file mode 100644
index 5466be721..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/error.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark(object):
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end].encode('utf-8')
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/collectors/python.d.plugin/python_modules/pyyaml2/events.py
deleted file mode 100644
index 283452add..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/events.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
deleted file mode 100644
index 1c195531f..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
deleted file mode 100644
index ed2a1b43e..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
deleted file mode 100644
index 97ba08337..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
+++ /dev/null
@@ -1,590 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
deleted file mode 100644
index 8d422954e..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# SPDX-License-Identifier: MIT
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, str):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to unicode,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object,
- # - a `unicode` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, unicode):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, str):
- self.name = "<string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = ''
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and len(self.raw_buffer) < 2:
- self.update_raw()
- if not isinstance(self.raw_buffer, unicode):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError, exc:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=1024):
- data = self.stream.read(size)
- if data:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- else:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
deleted file mode 100644
index 0a1404eca..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
+++ /dev/null
@@ -1,485 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import sys, copy_reg, types
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if type(data) is types.InstanceType:
- data_types = self.get_classobj_bases(data.__class__)+list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, unicode(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
- add_representer = classmethod(add_representer)
-
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
- add_multi_representer = classmethod(add_multi_representer)
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = mapping.items()
- mapping.sort()
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, unicode, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed by
- # calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never be
- # called and the class instance is created by instantiating a trivial
- # class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary, we
- # produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:'+class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
- Representer.represent_str)
-
-Representer.add_representer(unicode,
- Representer.represent_unicode)
-
-Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
deleted file mode 100644
index 49922debf..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver(object):
-
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
- add_implicit_resolver = classmethod(add_implicit_resolver)
-
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, basestring) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (basestring, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
- add_path_resolver = classmethod(add_path_resolver)
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, basestring):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, basestring):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(ur'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(ur'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(ur'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(ur'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(ur'^(?:!|&|\*)$'),
- list(u'!&*'))
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
deleted file mode 100644
index 971da6127..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
+++ /dev/null
@@ -1,1458 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from error import MarkedYAMLError
-from tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % ch.encode('utf-8'), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in self.possible_simple_keys.keys():
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
- and (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k).encode('utf-8')), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(unichr(code))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in u',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':'
- and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
- (self.peek(k).encode('utf-8')), self.get_mark())
- bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- value = unicode(''.join(bytes), 'utf-8')
- except UnicodeDecodeError, exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
deleted file mode 100644
index 15fdbb0c0..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer(object):
-
- ANCHOR_TEMPLATE = u'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
deleted file mode 100644
index c5c4fb116..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
deleted file mode 100644
index a884b33cf..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from .error import *
-
-from .tokens import *
-from .events import *
-from .nodes import *
-
-from .loader import *
-from .dumper import *
-
-__version__ = '3.11'
-try:
- from .cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-import io
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- stream = io.StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(metaclass=YAMLObjectMetaclass):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- @classmethod
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
-
- @classmethod
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
deleted file mode 100644
index c418bba91..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer:
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor, event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor, self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == '!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
deleted file mode 100644
index ee09a7a7e..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
+++ /dev/null
@@ -1,687 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor:
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = next(generator)
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- if not isinstance(key, collections.Hashable):
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- @classmethod
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
-
- @classmethod
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == 'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return super().construct_scalar(node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == 'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == 'tag:yaml.org,2002:value':
- key_node.tag = 'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return super().construct_mapping(node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- 'yes': True,
- 'no': False,
- 'true': True,
- 'false': False,
- 'on': True,
- 'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- r'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- return self.construct_scalar(node)
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag,
- node.start_mark)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node)
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_bytes(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- def construct_python_long(self, node):
- return self.construct_yaml_int(node)
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name, exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if '.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = 'builtins'
- object_name = name
- try:
- __import__(module_name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name, exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r"
- % (object_name, module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
deleted file mode 100644
index e6c16d894..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
deleted file mode 100644
index ba590c6e6..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
deleted file mode 100644
index d4be65a8e..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
+++ /dev/null
@@ -1,1138 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis:
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter:
-
- DEFAULT_TAG_PREFIXES = {
- '!' : '!',
- 'tag:yaml.org,2002:' : '!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = '\n'
- if line_break in ['\r', '\n', '\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not hasattr(self.stream, 'encoding'):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = sorted(self.event.tags.keys())
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator('---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator('...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor('&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor('*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator('[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator('{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator('-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == '')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = '!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return '%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != '!' or handle[-1] != '!':
- raise EmitterError("tag handle must start and end with '!': %r" % handle)
- for ch in handle[1:-1]:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch, handle))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == '!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return ''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == '!':
- return tag
- handle = None
- suffix = tag
- prefixes = sorted(self.tag_prefixes.keys())
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == '!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.~*\'()[]' \
- or (ch == '!' and handle != '!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = ''.join(chunks)
- if handle:
- return '%s%s' % (handle, suffix_text)
- else:
- return '!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch, anchor))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith('---') or scalar.startswith('...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in '#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in '?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in ',?[]{}':
- flow_indicators = True
- if ch == ':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in '\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
- if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == ' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in '\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write('\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = ' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = ' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = '%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = '%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator('\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != ' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == '\'':
- data = '\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
- self.write_indicator('\'', False)
-
- ESCAPE_REPLACEMENTS = {
- '\0': '0',
- '\x07': 'a',
- '\x08': 'b',
- '\x09': 't',
- '\x0A': 'n',
- '\x0B': 'v',
- '\x0C': 'f',
- '\x0D': 'r',
- '\x1B': 'e',
- '\"': '\"',
- '\\': '\\',
- '\x85': 'N',
- '\xA0': '_',
- '\u2028': 'L',
- '\u2029': 'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator('"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
- or not ('\x20' <= ch <= '\x7E'
- or (self.allow_unicode
- and ('\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= '\xFF':
- data = '\\x%02X' % ord(ch)
- elif ch <= '\uFFFF':
- data = '\\u%04X' % ord(ch)
- else:
- data = '\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == ' ':
- data = '\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator('"', False)
-
- def determine_block_hints(self, text):
- hints = ''
- if text:
- if text[0] in ' \n\x85\u2028\u2029':
- hints += str(self.best_indent)
- if text[-1] not in '\n\x85\u2028\u2029':
- hints += '-'
- elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
- hints += '+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('>'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != ' ' \
- and text[start] == '\n':
- self.write_line_break()
- leading_space = (ch == ' ')
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- spaces = (ch == ' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('|'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in '\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = ' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/collectors/python.d.plugin/python_modules/pyyaml3/error.py
deleted file mode 100644
index 5fec7d449..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/error.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end]
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/collectors/python.d.plugin/python_modules/pyyaml3/events.py
deleted file mode 100644
index 283452add..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/events.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
deleted file mode 100644
index 7ef6cf815..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
deleted file mode 100644
index ed2a1b43e..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
deleted file mode 100644
index bcec7f994..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
+++ /dev/null
@@ -1,590 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser:
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- '!': '!',
- '!!': 'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == 'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == 'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle,
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle,
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == '!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == '!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == '!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), '',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), '', mark, mark)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
deleted file mode 100644
index 0a515fd64..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# SPDX-License-Identifier: MIT
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from .error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, bytes):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to a unicode string,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `bytes` object,
- # - a `str` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = ''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, str):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+'\0'
- elif isinstance(stream, bytes):
- self.name = "<byte string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = None
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in '\n\x85\u2028\u2029' \
- or (ch == '\r' and self.buffer[self.pointer] != '\n'):
- self.line += 1
- self.column = 0
- elif ch != '\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
- self.update_raw()
- if isinstance(self.raw_buffer, bytes):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError as exc:
- character = self.raw_buffer[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += '\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=4096):
- data = self.stream.read(size)
- if self.raw_buffer is None:
- self.raw_buffer = data
- else:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- if not data:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
deleted file mode 100644
index 756a18dcc..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter:
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, str(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- @classmethod
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
-
- @classmethod
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = list(mapping.items())
- try:
- mapping = sorted(mapping)
- except TypeError:
- pass
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, bytes, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
- def represent_str(self, data):
- return self.represent_scalar('tag:yaml.org,2002:str', data)
-
- def represent_binary(self, data):
- if hasattr(base64, 'encodebytes'):
- data = base64.encodebytes(data).decode('ascii')
- else:
- data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
- def represent_bool(self, data):
- if data:
- value = 'true'
- else:
- value = 'false'
- return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = '.nan'
- elif data == self.inf_value:
- value = '.inf'
- elif data == -self.inf_value:
- value = '-.inf'
- else:
- value = repr(data).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if '.' not in value and 'e' in value:
- value = value.replace('e', '.0e', 1)
- return self.represent_scalar('tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence('tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping('tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping('tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = data.isoformat()
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = data.isoformat(' ')
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = '%r' % data.real
- elif data.real == 0.0:
- data = '%rj' % data.imag
- elif data.imag > 0:
- data = '%r+%rj' % (data.real, data.imag)
- else:
- data = '%r%rj' % (data.real, data.imag)
- return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = '%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
- def represent_module(self, data):
- return self.represent_scalar(
- 'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copyreg.dispatch_table:
- reduce = copyreg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = 'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = 'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = '%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- 'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
deleted file mode 100644
index 50945e04d..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver:
-
- DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- @classmethod
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
- @classmethod
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, str) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (str, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, str):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, str):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == '':
- resolvers = self.yaml_implicit_resolvers.get('', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:bool',
- re.compile(r'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:float',
- re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:int',
- re.compile(r'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:merge',
- re.compile(r'^(?:<<)$'),
- ['<'])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:null',
- re.compile(r'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:timestamp',
- re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list('0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:value',
- re.compile(r'^(?:=)$'),
- ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:yaml',
- re.compile(r'^(?:!|&|\*)$'),
- list('!&*'))
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
deleted file mode 100644
index b55854e8b..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
+++ /dev/null
@@ -1,1449 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey:
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner:
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == '\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == '%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == '-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == '.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == '\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == '[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == '{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == ']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == '}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == ',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == '-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == '?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == ':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == '*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == '&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == '!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == '|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == '>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == '\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == '\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token" % ch,
- self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in list(self.possible_simple_keys):
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '---' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '...' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
- and (ch == '-' or (not self.flow_level and ch in '?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == '\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == 'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == 'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r" % self.peek(),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not ('0' <= ch <= '9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch, self.get_mark())
- length = 0
- while '0' <= self.peek(length) <= '9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == ' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != ' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch, self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == '*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == '<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != '>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- elif ch in '\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = '!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in '\0 \r\n\x85\u2028\u2029':
- if ch == '!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = '!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = '!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = ''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != '\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in ' \t'
- length = 0
- while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != '\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == '\n' \
- and leading_non_space and self.peek() not in ' \t':
- if not breaks:
- chunks.append(' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == '\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch, self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r" % ch,
- self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() != ' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- while self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- '0': '\0',
- 'a': '\x07',
- 'b': '\x08',
- 't': '\x09',
- '\t': '\x09',
- 'n': '\x0A',
- 'v': '\x0B',
- 'f': '\x0C',
- 'r': '\x0D',
- 'e': '\x1B',
- ' ': '\x20',
- '\"': '\"',
- '\\': '\\',
- 'N': '\x85',
- '_': '\xA0',
- 'L': '\u2028',
- 'P': '\u2029',
- }
-
- ESCAPE_CODES = {
- 'x': 2,
- 'u': 4,
- 'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == '\'' and self.peek(1) == '\'':
- chunks.append('\'')
- self.forward(2)
- elif (double and ch == '\'') or (not double and ch in '\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == '\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k)), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(chr(code))
- self.forward(length)
- elif ch in '\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch, self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in ' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == '\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in ' \t':
- self.forward()
- if self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == '#':
- break
- while True:
- ch = self.peek(length)
- if ch in '\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == ':' and
- self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in ',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == ':'
- and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == '#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in ' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != '!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != ' ':
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if ch != '!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.!~*\'()[]%':
- if ch == '%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch, self.get_mark())
- return ''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- codes = []
- mark = self.get_mark()
- while self.peek() == '%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
- % self.peek(k), self.get_mark())
- codes.append(int(self.prefix(2), 16))
- self.forward(2)
- try:
- value = bytes(codes).decode('utf-8')
- except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in '\r\n\x85':
- if self.prefix(2) == '\r\n':
- self.forward(2)
- else:
- self.forward()
- return '\n'
- elif ch in '\u2028\u2029':
- self.forward()
- return ch
- return ''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
deleted file mode 100644
index 1ba2f7f9d..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer:
-
- ANCHOR_TEMPLATE = 'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
deleted file mode 100644
index c5c4fb116..000000000
--- a/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
deleted file mode 100644
index ec21779a0..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
+++ /dev/null
@@ -1,515 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
-#
-# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
-# Copyright (C) 2017 Austin S. Hemmelgarn
-#
-# SPDX-License-Identifier: GPL-3.0
-
-# Based on client/boinc_cmd.cpp
-
-import hashlib
-import socket
-import sys
-import time
-from functools import total_ordering
-from xml.etree import ElementTree
-
-GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
-
-GUI_RPC_HOSTNAME = None # localhost
-GUI_RPC_PORT = 31416
-GUI_RPC_TIMEOUT = 1
-
-class Rpc(object):
- ''' Class to perform GUI RPC calls to a BOINC core client.
- Usage in a context manager ('with' block) is recommended to ensure
- disconnect() is called. Using the same instance for all calls is also
- recommended so it reuses the same socket connection
- '''
- def __init__(self, hostname="", port=0, timeout=0, text_output=False):
- self.hostname = hostname
- self.port = port
- self.timeout = timeout
- self.sock = None
- self.text_output = text_output
-
- @property
- def sockargs(self):
- return (self.hostname, self.port, self.timeout)
-
- def __enter__(self): self.connect(*self.sockargs); return self
- def __exit__(self, *args): self.disconnect()
-
- def connect(self, hostname="", port=0, timeout=0):
- ''' Connect to (hostname, port) with timeout in seconds.
- Hostname defaults to None (localhost), and port to 31416
- Calling multiple times will disconnect previous connection (if any),
- and (re-)connect to host.
- '''
- if self.sock:
- self.disconnect()
-
- self.hostname = hostname or GUI_RPC_HOSTNAME
- self.port = port or GUI_RPC_PORT
- self.timeout = timeout or GUI_RPC_TIMEOUT
-
- self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
-
- def disconnect(self):
- ''' Disconnect from host. Calling multiple times is OK (idempotent)
- '''
- if self.sock:
- self.sock.close()
- self.sock = None
-
- def call(self, request, text_output=None):
- ''' Do an RPC call. Pack and send the XML request and return the
- unpacked reply. request can be either plain XML text or a
- xml.etree.ElementTree.Element object. Return ElementTree.Element
- or XML text according to text_output flag.
- Will auto-connect if not connected.
- '''
- if text_output is None:
- text_output = self.text_output
-
- if not self.sock:
- self.connect(*self.sockargs)
-
- if not isinstance(request, ElementTree.Element):
- request = ElementTree.fromstring(request)
-
- # pack request
- end = '\003'
- if sys.version_info[0] < 3:
- req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
- else:
- req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
-
- try:
- self.sock.sendall(req)
- except (socket.error, socket.herror, socket.gaierror, socket.timeout):
- raise
-
- req = ""
- while True:
- try:
- buf = self.sock.recv(8192)
- if not buf:
- raise socket.error("No data from socket")
- if sys.version_info[0] >= 3:
- buf = buf.decode()
- except socket.error:
- raise
- n = buf.find(end)
- if not n == -1: break
- req += buf
- req += buf[:n]
-
- # unpack reply (remove root tag, ie: first and last lines)
- req = '\n'.join(req.strip().rsplit('\n')[1:-1])
-
- if text_output:
- return req
- else:
- return ElementTree.fromstring(req)
-
-def setattrs_from_xml(obj, xml, attrfuncdict={}):
- ''' Helper to set values for attributes of a class instance by mapping
- matching tags from a XML file.
- attrfuncdict is a dict of functions to customize value data type of
- each attribute. It falls back to simple int/float/bool/str detection
- based on values defined in __init__(). This would not be needed if
- Boinc used standard RPC protocol, which includes data type in XML.
- '''
- if not isinstance(xml, ElementTree.Element):
- xml = ElementTree.fromstring(xml)
- for e in list(xml):
- if hasattr(obj, e.tag):
- attr = getattr(obj, e.tag)
- attrfunc = attrfuncdict.get(e.tag, None)
- if attrfunc is None:
- if isinstance(attr, bool): attrfunc = parse_bool
- elif isinstance(attr, int): attrfunc = parse_int
- elif isinstance(attr, float): attrfunc = parse_float
- elif isinstance(attr, str): attrfunc = parse_str
- elif isinstance(attr, list): attrfunc = parse_list
- else: attrfunc = lambda x: x
- setattr(obj, e.tag, attrfunc(e))
- else:
- pass
- #print "class missing attribute '%s': %r" % (e.tag, obj)
- return obj
-
-
-def parse_bool(e):
- ''' Helper to convert ElementTree.Element.text to boolean.
- Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
- Treat '0' and 'false' as False
- '''
- if e.text is None:
- return True
- else:
- return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
-
-
-def parse_int(e):
- ''' Helper to convert ElementTree.Element.text to integer.
- Treat '<foo/>' (and '<foo></foo>') as 0
- '''
- # int(float()) allows casting to int a value expressed as float in XML
- return 0 if e.text is None else int(float(e.text.strip()))
-
-
-def parse_float(e):
- ''' Helper to convert ElementTree.Element.text to float. '''
- return 0.0 if e.text is None else float(e.text.strip())
-
-
-def parse_str(e):
- ''' Helper to convert ElementTree.Element.text to string. '''
- return "" if e.text is None else e.text.strip()
-
-
-def parse_list(e):
- ''' Helper to convert ElementTree.Element to list. For now, simply return
- the list of root element's children
- '''
- return list(e)
-
-
-class Enum(object):
- UNKNOWN = -1 # Not in original API
-
- @classmethod
- def name(cls, value):
- ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
-
- # value as string, if it matches an enum attribute.
- # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
- if hasattr(cls, str(value)):
- return cls.name(getattr(cls, value, None))
-
- # value not handled in subclass name()
- for k, v in cls.__dict__.items():
- if v == value:
- return k.lower().replace('_', ' ')
-
- # value not found
- return cls.name(Enum.UNKNOWN)
-
-
-class CpuSched(Enum):
- ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
- "SCHEDULED" is synonymous with "executing" except when CPU throttling
- is in use.
- '''
- UNINITIALIZED = 0
- PREEMPTED = 1
- SCHEDULED = 2
-
-
-class ResultState(Enum):
- ''' Values of RESULT::state in client.
- THESE MUST BE IN NUMERICAL ORDER
- (because of the > comparison in RESULT::computing_done())
- see html/inc/common_defs.inc
- '''
- NEW = 0
- #// New result
- FILES_DOWNLOADING = 1
- #// Input files for result (WU, app version) are being downloaded
- FILES_DOWNLOADED = 2
- #// Files are downloaded, result can be (or is being) computed
- COMPUTE_ERROR = 3
- #// computation failed; no file upload
- FILES_UPLOADING = 4
- #// Output files for result are being uploaded
- FILES_UPLOADED = 5
- #// Files are uploaded, notify scheduling server at some point
- ABORTED = 6
- #// result was aborted
- UPLOAD_FAILED = 7
- #// some output file permanent failure
-
-
-class Process(Enum):
- ''' values of ACTIVE_TASK::task_state '''
- UNINITIALIZED = 0
- #// process doesn't exist yet
- EXECUTING = 1
- #// process is running, as far as we know
- SUSPENDED = 9
- #// we've sent it a "suspend" message
- ABORT_PENDING = 5
- #// process exceeded limits; send "abort" message, waiting to exit
- QUIT_PENDING = 8
- #// we've sent it a "quit" message, waiting to exit
- COPY_PENDING = 10
- #// waiting for async file copies to finish
-
-
-class _Struct(object):
- ''' base helper class with common methods for all classes derived from
- BOINC's C++ structs
- '''
- @classmethod
- def parse(cls, xml):
- return setattrs_from_xml(cls(), xml)
-
- def __str__(self, indent=0):
- buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
- for attr in self.__dict__:
- value = getattr(self, attr)
- if isinstance(value, list):
- buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
- for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
- buf += '\t]\n'
- else:
- buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
- attr,
- value.__str__(indent+2)
- if isinstance(value, _Struct)
- else repr(value))
- return buf
-
-
-@total_ordering
-class VersionInfo(_Struct):
- def __init__(self, major=0, minor=0, release=0):
- self.major = major
- self.minor = minor
- self.release = release
-
- @property
- def _tuple(self):
- return (self.major, self.minor, self.release)
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self._tuple == other._tuple
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __gt__(self, other):
- if not isinstance(other, self.__class__):
- return NotImplemented
- return self._tuple > other._tuple
-
- def __str__(self):
- return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
-
- def __repr__(self):
- return "{0}{1}".format(self.__class__.__name__, self._tuple)
-
-
-class Result(_Struct):
- ''' Also called "task" in some contexts '''
- def __init__(self):
- # Names and values follow lib/gui_rpc_client.h @ RESULT
- # Order too, except when grouping contradicts client/result.cpp
- # RESULT::write_gui(), then XML order is used.
-
- self.name = ""
- self.wu_name = ""
- self.version_num = 0
- #// identifies the app used
- self.plan_class = ""
- self.project_url = "" # from PROJECT.master_url
- self.report_deadline = 0.0 # seconds since epoch
- self.received_time = 0.0 # seconds since epoch
- #// when we got this from server
- self.ready_to_report = False
- #// we're ready to report this result to the server;
- #// either computation is done and all the files have been uploaded
- #// or there was an error
- self.got_server_ack = False
- #// we've received the ack for this result from the server
- self.final_cpu_time = 0.0
- self.final_elapsed_time = 0.0
- self.state = ResultState.NEW
- self.estimated_cpu_time_remaining = 0.0
- #// actually, estimated elapsed time remaining
- self.exit_status = 0
- #// return value from the application
- self.suspended_via_gui = False
- self.project_suspended_via_gui = False
- self.edf_scheduled = False
- #// temporary used to tell GUI that this result is deadline-scheduled
- self.coproc_missing = False
- #// a coproc needed by this job is missing
- #// (e.g. because user removed their GPU board).
- self.scheduler_wait = False
- self.scheduler_wait_reason = ""
- self.network_wait = False
- self.resources = ""
- #// textual description of resources used
-
- #// the following defined if active
- # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
- self.active_task = False
- self.active_task_state = Process.UNINITIALIZED
- self.app_version_num = 0
- self.slot = -1
- self.pid = 0
- self.scheduler_state = CpuSched.UNINITIALIZED
- self.checkpoint_cpu_time = 0.0
- self.current_cpu_time = 0.0
- self.fraction_done = 0.0
- self.elapsed_time = 0.0
- self.swap_size = 0
- self.working_set_size_smoothed = 0.0
- self.too_large = False
- self.needs_shmem = False
- self.graphics_exec_path = ""
- self.web_graphics_url = ""
- self.remote_desktop_addr = ""
- self.slot_path = ""
- #// only present if graphics_exec_path is
-
- # The following are not in original API, but are present in RPC XML reply
- self.completed_time = 0.0
- #// time when ready_to_report was set
- self.report_immediately = False
- self.working_set_size = 0
- self.page_fault_rate = 0.0
- #// derived by higher-level code
-
- # The following are in API, but are NEVER in RPC XML reply. Go figure
- self.signal = 0
-
- self.app = None # APP*
- self.wup = None # WORKUNIT*
- self.project = None # PROJECT*
- self.avp = None # APP_VERSION*
-
- @classmethod
- def parse(cls, xml):
- if not isinstance(xml, ElementTree.Element):
- xml = ElementTree.fromstring(xml)
-
- # parse main XML
- result = super(Result, cls).parse(xml)
-
- # parse '<active_task>' children
- active_task = xml.find('active_task')
- if active_task is None:
- result.active_task = False # already the default after __init__()
- else:
- result.active_task = True # already the default after main parse
- result = setattrs_from_xml(result, active_task)
-
- #// if CPU time is nonzero but elapsed time is zero,
- #// we must be talking to an old client.
- #// Set elapsed = CPU
- #// (easier to deal with this here than in the manager)
- if result.current_cpu_time != 0 and result.elapsed_time == 0:
- result.elapsed_time = result.current_cpu_time
-
- if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
- result.final_elapsed_time = result.final_cpu_time
-
- return result
-
- def __str__(self):
- buf = '{0}:\n'.format(self.__class__.__name__)
- for attr in self.__dict__:
- value = getattr(self, attr)
- if attr in ['received_time', 'report_deadline']:
- value = time.ctime(value)
- buf += '\t{0}\t{1}\n'.format(attr, value)
- return buf
-
-
-class BoincClient(object):
-
- def __init__(self, host="", port=0, passwd=None):
- self.hostname = host
- self.port = port
- self.passwd = passwd
- self.rpc = Rpc(text_output=False)
- self.version = None
- self.authorized = False
-
- # Informative, not authoritative. Records status of *last* RPC call,
- # but does not infer success about the *next* one.
- # Thus, it should be read *after* an RPC call, not prior to one
- self.connected = False
-
- def __enter__(self): self.connect(); return self
- def __exit__(self, *args): self.disconnect()
-
- def connect(self):
- try:
- self.rpc.connect(self.hostname, self.port)
- self.connected = True
- except socket.error:
- self.connected = False
- return
- self.authorized = self.authorize(self.passwd)
- self.version = self.exchange_versions()
-
- def disconnect(self):
- self.rpc.disconnect()
-
- def authorize(self, password):
- ''' Request authorization. If password is None and we are connecting
- to localhost, try to read password from the local config file
- GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
- permission to read), try to authorize with a blank password.
- If authorization is requested and fails, all subsequent calls
- will be refused with socket.error 'Connection reset by peer' (104).
- Since most local calls do no require authorization, do not attempt
- it if you're not sure about the password.
- '''
- if password is None and not self.hostname:
- password = read_gui_rpc_password() or ""
- nonce = self.rpc.call('<auth1/>').text
- authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
- reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
-
- if reply.tag == 'authorized':
- return True
- else:
- return False
-
- def exchange_versions(self):
- ''' Return VersionInfo instance with core client version info '''
- return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
-
- def get_tasks(self):
- ''' Same as get_results(active_only=False) '''
- return self.get_results(False)
-
- def get_results(self, active_only=False):
- ''' Get a list of results.
- Those that are in progress will have information such as CPU time
- and fraction done. Each result includes a name;
- Use CC_STATE::lookup_result() to find this result in the current static state;
- if it's not there, call get_state() again.
- '''
- reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
- if not reply.tag == 'results':
- return []
-
- results = []
- for item in list(reply):
- results.append(Result.parse(item))
-
- return results
-
-
-def read_gui_rpc_password():
- ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
- (if any), and return it
- '''
- try:
- with open(GUI_RPC_PASSWD_FILE, 'r') as f:
- buf = f.read()
- if buf.endswith('\n'): return buf[:-1] # trim last CR
- else: return buf
- except IOError:
- # Permission denied or File not found.
- pass
diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py
deleted file mode 100644
index 4c981672b..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/filelock.py
+++ /dev/null
@@ -1,451 +0,0 @@
-# This is free and unencumbered software released into the public domain.
-#
-# Anyone is free to copy, modify, publish, use, compile, sell, or
-# distribute this software, either in source code form or as a compiled
-# binary, for any purpose, commercial or non-commercial, and by any
-# means.
-#
-# In jurisdictions that recognize copyright laws, the author or authors
-# of this software dedicate any and all copyright interest in the
-# software to the public domain. We make this dedication for the benefit
-# of the public at large and to the detriment of our heirs and
-# successors. We intend this dedication to be an overt act of
-# relinquishment in perpetuity of all present and future rights to this
-# software under copyright law.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# For more information, please refer to <http://unlicense.org>
-
-"""
-A platform independent file lock that supports the with-statement.
-"""
-
-
-# Modules
-# ------------------------------------------------
-import logging
-import os
-import threading
-import time
-try:
- import warnings
-except ImportError:
- warnings = None
-
-try:
- import msvcrt
-except ImportError:
- msvcrt = None
-
-try:
- import fcntl
-except ImportError:
- fcntl = None
-
-
-# Backward compatibility
-# ------------------------------------------------
-try:
- TimeoutError
-except NameError:
- TimeoutError = OSError
-
-
-# Data
-# ------------------------------------------------
-__all__ = [
- "Timeout",
- "BaseFileLock",
- "WindowsFileLock",
- "UnixFileLock",
- "SoftFileLock",
- "FileLock"
-]
-
-__version__ = "3.0.12"
-
-
-_logger = None
-def logger():
- """Returns the logger instance used in this module."""
- global _logger
- _logger = _logger or logging.getLogger(__name__)
- return _logger
-
-
-# Exceptions
-# ------------------------------------------------
-class Timeout(TimeoutError):
- """
- Raised when the lock could not be acquired in *timeout*
- seconds.
- """
-
- def __init__(self, lock_file):
- """
- """
- #: The path of the file lock.
- self.lock_file = lock_file
- return None
-
- def __str__(self):
- temp = "The file lock '{}' could not be acquired."\
- .format(self.lock_file)
- return temp
-
-
-# Classes
-# ------------------------------------------------
-
-# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
-# and wraps the lock to make sure __enter__ is not called twice when entering
-# the with statement.
-# If we would simply return *self*, the lock would be acquired again
-# in the *__enter__* method of the BaseFileLock, but not released again
-# automatically.
-#
-# :seealso: issue #37 (memory leak)
-class _Acquire_ReturnProxy(object):
-
- def __init__(self, lock):
- self.lock = lock
- return None
-
- def __enter__(self):
- return self.lock
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.lock.release()
- return None
-
-
-class BaseFileLock(object):
- """
- Implements the base class of a file lock.
- """
-
- def __init__(self, lock_file, timeout = -1):
- """
- """
- # The path to the lock file.
- self._lock_file = lock_file
-
- # The file descriptor for the *_lock_file* as it is returned by the
- # os.open() function.
- # This file lock is only NOT None, if the object currently holds the
- # lock.
- self._lock_file_fd = None
-
- # The default timeout value.
- self.timeout = timeout
-
- # We use this lock primarily for the lock counter.
- self._thread_lock = threading.Lock()
-
- # The lock counter is used for implementing the nested locking
- # mechanism. Whenever the lock is acquired, the counter is increased and
- # the lock is only released, when this value is 0 again.
- self._lock_counter = 0
- return None
-
- @property
- def lock_file(self):
- """
- The path to the lock file.
- """
- return self._lock_file
-
- @property
- def timeout(self):
- """
- You can set a default timeout for the filelock. It will be used as
- fallback value in the acquire method, if no timeout value (*None*) is
- given.
-
- If you want to disable the timeout, set it to a negative value.
-
- A timeout of 0 means, that there is exactly one attempt to acquire the
- file lock.
-
- .. versionadded:: 2.0.0
- """
- return self._timeout
-
- @timeout.setter
- def timeout(self, value):
- """
- """
- self._timeout = float(value)
- return None
-
- # Platform dependent locking
- # --------------------------------------------
-
- def _acquire(self):
- """
- Platform dependent. If the file lock could be
- acquired, self._lock_file_fd holds the file descriptor
- of the lock file.
- """
- raise NotImplementedError()
-
- def _release(self):
- """
- Releases the lock and sets self._lock_file_fd to None.
- """
- raise NotImplementedError()
-
- # Platform independent methods
- # --------------------------------------------
-
- @property
- def is_locked(self):
- """
- True, if the object holds the file lock.
-
- .. versionchanged:: 2.0.0
-
- This was previously a method and is now a property.
- """
- return self._lock_file_fd is not None
-
- def acquire(self, timeout=None, poll_intervall=0.05):
- """
- Acquires the file lock or fails with a :exc:`Timeout` error.
-
- .. code-block:: python
-
- # You can use this method in the context manager (recommended)
- with lock.acquire():
- pass
-
- # Or use an equivalent try-finally construct:
- lock.acquire()
- try:
- pass
- finally:
- lock.release()
-
- :arg float timeout:
- The maximum time waited for the file lock.
- If ``timeout < 0``, there is no timeout and this method will
- block until the lock could be acquired.
- If ``timeout`` is None, the default :attr:`~timeout` is used.
-
- :arg float poll_intervall:
- We check once in *poll_intervall* seconds if we can acquire the
- file lock.
-
- :raises Timeout:
- if the lock could not be acquired in *timeout* seconds.
-
- .. versionchanged:: 2.0.0
-
- This method returns now a *proxy* object instead of *self*,
- so that it can be used in a with statement without side effects.
- """
- # Use the default timeout, if no timeout is provided.
- if timeout is None:
- timeout = self.timeout
-
- # Increment the number right at the beginning.
- # We can still undo it, if something fails.
- with self._thread_lock:
- self._lock_counter += 1
-
- lock_id = id(self)
- lock_filename = self._lock_file
- start_time = time.time()
- try:
- while True:
- with self._thread_lock:
- if not self.is_locked:
- logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename)
- self._acquire()
-
- if self.is_locked:
- logger().info('Lock %s acquired on %s', lock_id, lock_filename)
- break
- elif timeout >= 0 and time.time() - start_time > timeout:
- logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename)
- raise Timeout(self._lock_file)
- else:
- logger().debug(
- 'Lock %s not acquired on %s, waiting %s seconds ...',
- lock_id, lock_filename, poll_intervall
- )
- time.sleep(poll_intervall)
- except:
- # Something did go wrong, so decrement the counter.
- with self._thread_lock:
- self._lock_counter = max(0, self._lock_counter - 1)
-
- raise
- return _Acquire_ReturnProxy(lock = self)
-
- def release(self, force = False):
- """
- Releases the file lock.
-
- Please note, that the lock is only completly released, if the lock
- counter is 0.
-
- Also note, that the lock file itself is not automatically deleted.
-
- :arg bool force:
- If true, the lock counter is ignored and the lock is released in
- every case.
- """
- with self._thread_lock:
-
- if self.is_locked:
- self._lock_counter -= 1
-
- if self._lock_counter == 0 or force:
- lock_id = id(self)
- lock_filename = self._lock_file
-
- logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename)
- self._release()
- self._lock_counter = 0
- logger().info('Lock %s released on %s', lock_id, lock_filename)
-
- return None
-
- def __enter__(self):
- self.acquire()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.release()
- return None
-
- def __del__(self):
- self.release(force = True)
- return None
-
-
-# Windows locking mechanism
-# ~~~~~~~~~~~~~~~~~~~~~~~~~
-
-class WindowsFileLock(BaseFileLock):
- """
- Uses the :func:`msvcrt.locking` function to hard lock the lock file on
- windows systems.
- """
-
- def _acquire(self):
- open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
-
- try:
- fd = os.open(self._lock_file, open_mode)
- except OSError:
- pass
- else:
- try:
- msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
- except (IOError, OSError):
- os.close(fd)
- else:
- self._lock_file_fd = fd
- return None
-
- def _release(self):
- fd = self._lock_file_fd
- self._lock_file_fd = None
- msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
- os.close(fd)
-
- try:
- os.remove(self._lock_file)
- # Probably another instance of the application
- # that acquired the file lock.
- except OSError:
- pass
- return None
-
-# Unix locking mechanism
-# ~~~~~~~~~~~~~~~~~~~~~~
-
-class UnixFileLock(BaseFileLock):
- """
- Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
- """
-
- def _acquire(self):
- open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
- fd = os.open(self._lock_file, open_mode)
-
- try:
- fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except (IOError, OSError):
- os.close(fd)
- else:
- self._lock_file_fd = fd
- return None
-
- def _release(self):
- # Do not remove the lockfile:
- #
- # https://github.com/benediktschmitt/py-filelock/issues/31
- # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
- fd = self._lock_file_fd
- self._lock_file_fd = None
- fcntl.flock(fd, fcntl.LOCK_UN)
- os.close(fd)
- return None
-
-# Soft lock
-# ~~~~~~~~~
-
-class SoftFileLock(BaseFileLock):
- """
- Simply watches the existence of the lock file.
- """
-
- def _acquire(self):
- open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
- try:
- fd = os.open(self._lock_file, open_mode)
- except (IOError, OSError):
- pass
- else:
- self._lock_file_fd = fd
- return None
-
- def _release(self):
- os.close(self._lock_file_fd)
- self._lock_file_fd = None
-
- try:
- os.remove(self._lock_file)
- # The file is already deleted and that's what we want.
- except OSError:
- pass
- return None
-
-
-# Platform filelock
-# ~~~~~~~~~~~~~~~~~
-
-#: Alias for the lock, which should be used for the current platform. On
-#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
-#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
-FileLock = None
-
-if msvcrt:
- FileLock = WindowsFileLock
-elif fcntl:
- FileLock = UnixFileLock
-else:
- FileLock = SoftFileLock
-
- if warnings is not None:
- warnings.warn("only soft file lock is available")
diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
deleted file mode 100644
index f873eac83..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1
-"""
-@package sensors.py
-Python Bindings for libsensors3
-
-use the documentation of libsensors for the low level API.
-see example.py for high level API usage.
-
-@author: Pavel Rojtberg (http://www.rojtberg.net)
-@see: https://github.com/paroj/sensors.py
-@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
-"""
-
-from ctypes import *
-import ctypes.util
-
-_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
-# see https://github.com/paroj/sensors.py/issues/1
-_libc.free.argtypes = [c_void_p]
-
-_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
-
-version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
-
-
-class SensorsError(Exception):
- pass
-
-
-class ErrorWildcards(SensorsError):
- pass
-
-
-class ErrorNoEntry(SensorsError):
- pass
-
-
-class ErrorAccessRead(SensorsError, OSError):
- pass
-
-
-class ErrorKernel(SensorsError, OSError):
- pass
-
-
-class ErrorDivZero(SensorsError, ZeroDivisionError):
- pass
-
-
-class ErrorChipName(SensorsError):
- pass
-
-
-class ErrorBusName(SensorsError):
- pass
-
-
-class ErrorParse(SensorsError):
- pass
-
-
-class ErrorAccessWrite(SensorsError, OSError):
- pass
-
-
-class ErrorIO(SensorsError, IOError):
- pass
-
-
-class ErrorRecursion(SensorsError):
- pass
-
-
-_ERR_MAP = {
- 1: ErrorWildcards,
- 2: ErrorNoEntry,
- 3: ErrorAccessRead,
- 4: ErrorKernel,
- 5: ErrorDivZero,
- 6: ErrorChipName,
- 7: ErrorBusName,
- 8: ErrorParse,
- 9: ErrorAccessWrite,
- 10: ErrorIO,
- 11: ErrorRecursion
-}
-
-
-def raise_sensor_error(errno, message=''):
- raise _ERR_MAP[abs(errno)](message)
-
-
-class bus_id(Structure):
- _fields_ = [("type", c_short),
- ("nr", c_short)]
-
-
-class chip_name(Structure):
- _fields_ = [("prefix", c_char_p),
- ("bus", bus_id),
- ("addr", c_int),
- ("path", c_char_p)]
-
-
-class feature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int)]
-
- # sensors_feature_type
- IN = 0x00
- FAN = 0x01
- TEMP = 0x02
- POWER = 0x03
- ENERGY = 0x04
- CURR = 0x05
- HUMIDITY = 0x06
- MAX_MAIN = 0x7
- VID = 0x10
- INTRUSION = 0x11
- MAX_OTHER = 0x12
- BEEP_ENABLE = 0x18
-
-
-class subfeature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int),
- ("mapping", c_int),
- ("flags", c_uint)]
-
-
-_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
-_hdl.sensors_get_features.restype = POINTER(feature)
-_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
-_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
-_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
-_hdl.sensors_strerror.restype = c_char_p
-
-### RAW API ###
-MODE_R = 1
-MODE_W = 2
-COMPUTE_MAPPING = 4
-
-
-def init(cfg_file=None):
- file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
-
- result = _hdl.sensors_init(file)
- if result != 0:
- raise_sensor_error(result, "sensors_init failed")
-
- if file is not None:
- _libc.fclose(file)
-
-
-def cleanup():
- _hdl.sensors_cleanup()
-
-
-def parse_chip_name(orig_name):
- ret = chip_name()
- err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
-
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
- return ret
-
-
-def strerror(errnum):
- return _hdl.sensors_strerror(errnum).decode("utf-8")
-
-
-def free_chip_name(chip):
- _hdl.sensors_free_chip_name(byref(chip))
-
-
-def get_detected_chips(match, nr):
- """
- @return: (chip, next nr to query)
- """
- _nr = c_int(nr)
-
- if match is not None:
- match = byref(match)
-
- chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
- chip = chip.contents if bool(chip) else None
- return chip, _nr.value
-
-
-def chip_snprintf_name(chip, buffer_size=200):
- """
- @param buffer_size defaults to the size used in the sensors utility
- """
- ret = create_string_buffer(buffer_size)
- err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
-
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
- return ret.value.decode("utf-8")
-
-
-def do_chip_sets(chip):
- """
- @attention this function was not tested
- """
- err = _hdl.sensors_do_chip_sets(byref(chip))
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
-
-def get_adapter_name(bus):
- return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
-
-
-def get_features(chip, nr):
- """
- @return: (feature, next nr to query)
- """
- _nr = c_int(nr)
- feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
- feature = feature.contents if bool(feature) else None
- return feature, _nr.value
-
-
-def get_label(chip, feature):
- ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
- val = cast(ptr, c_char_p).value.decode("utf-8")
- _libc.free(ptr)
- return val
-
-
-def get_all_subfeatures(chip, feature, nr):
- """
- @return: (subfeature, next nr to query)
- """
- _nr = c_int(nr)
- subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
- subfeature = subfeature.contents if bool(subfeature) else None
- return subfeature, _nr.value
-
-
-def get_value(chip, subfeature_nr):
- val = c_double()
- err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise_sensor_error(err, strerror(err))
- return val.value
-
-
-def set_value(chip, subfeature_nr, value):
- """
- @attention this function was not tested
- """
- val = c_double(value)
- err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
-
-### Convenience API ###
-class ChipIterator:
- def __init__(self, match=None):
- self.match = parse_chip_name(match) if match is not None else None
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- chip, self.nr = get_detected_chips(self.match, self.nr)
-
- if chip is None:
- raise StopIteration
-
- return chip
-
- def __del__(self):
- if self.match is not None:
- free_chip_name(self.match)
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class FeatureIterator:
- def __init__(self, chip):
- self.chip = chip
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- feature, self.nr = get_features(self.chip, self.nr)
-
- if feature is None:
- raise StopIteration
-
- return feature
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class SubFeatureIterator:
- def __init__(self, chip, feature):
- self.chip = chip
- self.feature = feature
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
-
- if subfeature is None:
- raise StopIteration
-
- return subfeature
-
- def next(self): # python2 compability
- return self.__next__()
diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
deleted file mode 100644
index a65a304b6..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Minecraft Remote Console module.
-#
-# Copyright (C) 2015 Barnaby Gale
-#
-# SPDX-License-Identifier: MIT
-
-import socket
-import select
-import struct
-import time
-
-
-class MCRconException(Exception):
- pass
-
-
-class MCRcon(object):
- socket = None
-
- def connect(self, host, port, password):
- if self.socket is not None:
- raise MCRconException("Already connected")
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.socket.settimeout(0.9)
- self.socket.connect((host, port))
- self.send(3, password)
-
- def disconnect(self):
- if self.socket is None:
- raise MCRconException("Already disconnected")
- self.socket.close()
- self.socket = None
-
- def read(self, length):
- data = b""
- while len(data) < length:
- data += self.socket.recv(length - len(data))
- return data
-
- def send(self, out_type, out_data):
- if self.socket is None:
- raise MCRconException("Must connect before sending data")
-
- # Send a request packet
- out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
- out_length = struct.pack('<i', len(out_payload))
- self.socket.send(out_length + out_payload)
-
- # Read response packets
- in_data = ""
- while True:
- # Read a packet
- in_length, = struct.unpack('<i', self.read(4))
- in_payload = self.read(in_length)
- in_id = struct.unpack('<ii', in_payload[:8])
- in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
-
- # Sanity checks
- if in_padding != b'\x00\x00':
- raise MCRconException("Incorrect padding")
- if in_id == -1:
- raise MCRconException("Login failed")
-
- # Record the response
- in_data += in_data_partial.decode('utf8')
-
- # If there's nothing more to receive, return the response
- if len(select.select([self.socket], [], [], 0)[0]) == 0:
- return in_data
-
- def command(self, command):
- result = self.send(2, command)
- time.sleep(0.003) # MC-72390 workaround
- return result
diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py
deleted file mode 100644
index 4ebd556c3..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/monotonic.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: Apache-2.0
-"""
- monotonic
- ~~~~~~~~~
-
- This module provides a ``monotonic()`` function which returns the
- value (in fractional seconds) of a clock which never goes backwards.
-
- On Python 3.3 or newer, ``monotonic`` will be an alias of
- ``time.monotonic`` from the standard library. On older versions,
- it will fall back to an equivalent implementation:
-
- +-------------+----------------------------------------+
- | Linux, BSD | ``clock_gettime(3)`` |
- +-------------+----------------------------------------+
- | Windows | ``GetTickCount`` or ``GetTickCount64`` |
- +-------------+----------------------------------------+
- | OS X | ``mach_absolute_time`` |
- +-------------+----------------------------------------+
-
- If no suitable implementation exists for the current platform,
- attempting to import this module (or to import from it) will
- cause a ``RuntimeError`` exception to be raised.
-
-
- Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org>
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-"""
-import time
-
-
-__all__ = ('monotonic',)
-
-
-try:
- monotonic = time.monotonic
-except AttributeError:
- import ctypes
- import ctypes.util
- import os
- import sys
- import threading
-
-
- def clock_clock_gettime_c_library():
- return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime
-
-
- def clock_clock_gettime_rt_library():
- return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime
-
-
- def clock_clock_gettime_c_library_synology6():
- return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime
-
-
- def clock_clock_gettime_rt_library_synology6():
- return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime
-
-
- def clock_gettime_linux():
- # see https://github.com/netdata/netdata/issues/7976
- order = [
- clock_clock_gettime_c_library,
- clock_clock_gettime_rt_library,
- clock_clock_gettime_c_library_synology6,
- clock_clock_gettime_rt_library_synology6,
- ]
-
- for gettime in order:
- try:
- return gettime()
- except (RuntimeError, AttributeError, OSError):
- continue
- raise RuntimeError('can not find c and rt libraries')
-
-
- try:
- if sys.platform == 'darwin': # OS X, iOS
- # See Technical Q&A QA1398 of the Mac Developer Library:
- # <https://developer.apple.com/library/mac/qa/qa1398/>
- libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
-
- class mach_timebase_info_data_t(ctypes.Structure):
- """System timebase info. Defined in <mach/mach_time.h>."""
- _fields_ = (('numer', ctypes.c_uint32),
- ('denom', ctypes.c_uint32))
-
- mach_absolute_time = libc.mach_absolute_time
- mach_absolute_time.restype = ctypes.c_uint64
-
- timebase = mach_timebase_info_data_t()
- libc.mach_timebase_info(ctypes.byref(timebase))
- ticks_per_second = timebase.numer / timebase.denom * 1.0e9
-
- def monotonic():
- """Monotonic clock, cannot go backward."""
- return mach_absolute_time() / ticks_per_second
-
- elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
- if sys.platform.startswith('cygwin'):
- # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
- # version 1.7.6. Using raw WinAPI for maximum version compatibility.
-
- # Ugly hack using the wrong calling convention (in 32-bit mode)
- # because ctypes has no windll under cygwin (and it also seems that
- # the code letting you select stdcall in _ctypes doesn't exist under
- # the preprocessor definitions relevant to cygwin).
- # This is 'safe' because:
- # 1. The ABI of GetTickCount and GetTickCount64 is identical for
- # both calling conventions because they both have no parameters.
- # 2. libffi masks the problem because after making the call it doesn't
- # touch anything through esp and epilogue code restores a correct
- # esp from ebp afterwards.
- try:
- kernel32 = ctypes.cdll.kernel32
- except OSError: # 'No such file or directory'
- kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
- else:
- kernel32 = ctypes.windll.kernel32
-
- GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
- if GetTickCount64:
- # Windows Vista / Windows Server 2008 or newer.
- GetTickCount64.restype = ctypes.c_ulonglong
-
- def monotonic():
- """Monotonic clock, cannot go backward."""
- return GetTickCount64() / 1000.0
-
- else:
- # Before Windows Vista.
- GetTickCount = kernel32.GetTickCount
- GetTickCount.restype = ctypes.c_uint32
-
- get_tick_count_lock = threading.Lock()
- get_tick_count_last_sample = 0
- get_tick_count_wraparounds = 0
-
- def monotonic():
- """Monotonic clock, cannot go backward."""
- global get_tick_count_last_sample
- global get_tick_count_wraparounds
-
- with get_tick_count_lock:
- current_sample = GetTickCount()
- if current_sample < get_tick_count_last_sample:
- get_tick_count_wraparounds += 1
- get_tick_count_last_sample = current_sample
-
- final_milliseconds = get_tick_count_wraparounds << 32
- final_milliseconds += get_tick_count_last_sample
- return final_milliseconds / 1000.0
-
- else:
- clock_gettime = clock_gettime_linux()
-
- class timespec(ctypes.Structure):
- """Time specification, as described in clock_gettime(3)."""
- _fields_ = (('tv_sec', ctypes.c_long),
- ('tv_nsec', ctypes.c_long))
-
- if sys.platform.startswith('linux'):
- CLOCK_MONOTONIC = 1
- elif sys.platform.startswith('freebsd'):
- CLOCK_MONOTONIC = 4
- elif sys.platform.startswith('sunos5'):
- CLOCK_MONOTONIC = 4
- elif 'bsd' in sys.platform:
- CLOCK_MONOTONIC = 3
- elif sys.platform.startswith('aix'):
- CLOCK_MONOTONIC = ctypes.c_longlong(10)
-
- def monotonic():
- """Monotonic clock, cannot go backward."""
- ts = timespec()
- if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
- errno = ctypes.get_errno()
- raise OSError(errno, os.strerror(errno))
- return ts.tv_sec + ts.tv_nsec / 1.0e9
-
- # Perform a sanity-check.
- if monotonic() - monotonic() > 0:
- raise ValueError('monotonic() is not monotonic!')
-
- except Exception as e:
- raise RuntimeError('no suitable implementation for this system: ' + repr(e))
diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
deleted file mode 100644
index 589401b8f..000000000
--- a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# SPDX-License-Identifier: MIT
-
-from UserDict import DictMixin
-
-
-class OrderedDict(dict, DictMixin):
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__end
- except AttributeError:
- self.clear()
- self.update(*args, **kwds)
-
- def clear(self):
- self.__end = end = []
- end += [None, end, end] # sentinel node for doubly linked list
- self.__map = {} # key --> [key, prev, next]
- dict.clear(self)
-
- def __setitem__(self, key, value):
- if key not in self:
- end = self.__end
- curr = end[1]
- curr[2] = end[1] = self.__map[key] = [key, curr, end]
- dict.__setitem__(self, key, value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- key, prev, next = self.__map.pop(key)
- prev[2] = next
- next[1] = prev
-
- def __iter__(self):
- end = self.__end
- curr = end[2]
- while curr is not end:
- yield curr[0]
- curr = curr[2]
-
- def __reversed__(self):
- end = self.__end
- curr = end[1]
- while curr is not end:
- yield curr[0]
- curr = curr[1]
-
- def popitem(self, last=True):
- if not self:
- raise KeyError('dictionary is empty')
- if last:
- key = reversed(self).next()
- else:
- key = iter(self).next()
- value = self.pop(key)
- return key, value
-
- def __reduce__(self):
- items = [[k, self[k]] for k in self]
- tmp = self.__map, self.__end
- del self.__map, self.__end
- inst_dict = vars(self).copy()
- self.__map, self.__end = tmp
- if inst_dict:
- return self.__class__, (items,), inst_dict
- return self.__class__, (items,)
-
- def keys(self):
- return list(self)
-
- setdefault = DictMixin.setdefault
- update = DictMixin.update
- pop = DictMixin.pop
- values = DictMixin.values
- items = DictMixin.items
- iterkeys = DictMixin.iterkeys
- itervalues = DictMixin.itervalues
- iteritems = DictMixin.iteritems
-
- def __repr__(self):
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
-
- def copy(self):
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- if isinstance(other, OrderedDict):
- if len(self) != len(other):
- return False
- for p, q in zip(self.items(), other.items()):
- if p != q:
- return False
- return True
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
diff --git a/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/__init__.py
deleted file mode 100644
index 3add84816..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/__init__.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-urllib3 - Thread-safe connection pooling and re-using.
-"""
-
-from __future__ import absolute_import
-import warnings
-
-from .connectionpool import (
- HTTPConnectionPool,
- HTTPSConnectionPool,
- connection_from_url
-)
-
-from . import exceptions
-from .filepost import encode_multipart_formdata
-from .poolmanager import PoolManager, ProxyManager, proxy_from_url
-from .response import HTTPResponse
-from .util.request import make_headers
-from .util.url import get_host
-from .util.timeout import Timeout
-from .util.retry import Retry
-
-
-# Set default logging handler to avoid "No handler found" warnings.
-import logging
-try: # Python 2.7+
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
-__license__ = 'MIT'
-__version__ = '1.21.1'
-
-__all__ = (
- 'HTTPConnectionPool',
- 'HTTPSConnectionPool',
- 'PoolManager',
- 'ProxyManager',
- 'HTTPResponse',
- 'Retry',
- 'Timeout',
- 'add_stderr_logger',
- 'connection_from_url',
- 'disable_warnings',
- 'encode_multipart_formdata',
- 'get_host',
- 'make_headers',
- 'proxy_from_url',
-)
-
-logging.getLogger(__name__).addHandler(NullHandler())
-
-
-def add_stderr_logger(level=logging.DEBUG):
- """
- Helper for quickly adding a StreamHandler to the logger. Useful for
- debugging.
-
- Returns the handler after adding it.
- """
- # This method needs to be in this __init__.py to get the __name__ correct
- # even if urllib3 is vendored within another package.
- logger = logging.getLogger(__name__)
- handler = logging.StreamHandler()
- handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
- logger.addHandler(handler)
- logger.setLevel(level)
- logger.debug('Added a stderr logging handler to logger: %s', __name__)
- return handler
-
-
-# ... Clean up.
-del NullHandler
-
-
-# All warning filters *must* be appended unless you're really certain that they
-# shouldn't be: otherwise, it's very hard for users to use most Python
-# mechanisms to silence them.
-# SecurityWarning's always go off by default.
-warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
-# SubjectAltNameWarning's should go off once per host
-warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
-# InsecurePlatformWarning's don't vary between requests, so we keep it default.
-warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
- append=True)
-# SNIMissingWarnings should go off only once.
-warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
-
-
-def disable_warnings(category=exceptions.HTTPWarning):
- """
- Helper for quickly disabling all urllib3 warnings.
- """
- warnings.simplefilter('ignore', category)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/collectors/python.d.plugin/python_modules/urllib3/_collections.py
deleted file mode 100644
index 2a6b3ec70..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/_collections.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-
-try:
- from collections import Mapping, MutableMapping
-except ImportError:
- from collections.abc import Mapping, MutableMapping
-
-try:
- from threading import RLock
-except ImportError: # Platform-specific: No threads available
- class RLock:
- def __enter__(self):
- pass
-
- def __exit__(self, exc_type, exc_value, traceback):
- pass
-
-
-try: # Python 2.7+
- from collections import OrderedDict
-except ImportError:
- from .packages.ordered_dict import OrderedDict
-from .packages.six import iterkeys, itervalues, PY3
-
-
-__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
-
-
-_Null = object()
-
-
-class RecentlyUsedContainer(MutableMapping):
- """
- Provides a thread-safe dict-like container which maintains up to
- ``maxsize`` keys while throwing away the least-recently-used keys beyond
- ``maxsize``.
-
- :param maxsize:
- Maximum number of recent elements to retain.
-
- :param dispose_func:
- Every time an item is evicted from the container,
- ``dispose_func(value)`` is called. Callback which will get called
- """
-
- ContainerCls = OrderedDict
-
- def __init__(self, maxsize=10, dispose_func=None):
- self._maxsize = maxsize
- self.dispose_func = dispose_func
-
- self._container = self.ContainerCls()
- self.lock = RLock()
-
- def __getitem__(self, key):
- # Re-insert the item, moving it to the end of the eviction line.
- with self.lock:
- item = self._container.pop(key)
- self._container[key] = item
- return item
-
- def __setitem__(self, key, value):
- evicted_value = _Null
- with self.lock:
- # Possibly evict the existing value of 'key'
- evicted_value = self._container.get(key, _Null)
- self._container[key] = value
-
- # If we didn't evict an existing value, we might have to evict the
- # least recently used item from the beginning of the container.
- if len(self._container) > self._maxsize:
- _key, evicted_value = self._container.popitem(last=False)
-
- if self.dispose_func and evicted_value is not _Null:
- self.dispose_func(evicted_value)
-
- def __delitem__(self, key):
- with self.lock:
- value = self._container.pop(key)
-
- if self.dispose_func:
- self.dispose_func(value)
-
- def __len__(self):
- with self.lock:
- return len(self._container)
-
- def __iter__(self):
- raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
-
- def clear(self):
- with self.lock:
- # Copy pointers to all values, then wipe the mapping
- values = list(itervalues(self._container))
- self._container.clear()
-
- if self.dispose_func:
- for value in values:
- self.dispose_func(value)
-
- def keys(self):
- with self.lock:
- return list(iterkeys(self._container))
-
-
-class HTTPHeaderDict(MutableMapping):
- """
- :param headers:
- An iterable of field-value pairs. Must not contain multiple field names
- when compared case-insensitively.
-
- :param kwargs:
- Additional field-value pairs to pass in to ``dict.update``.
-
- A ``dict`` like container for storing HTTP Headers.
-
- Field names are stored and compared case-insensitively in compliance with
- RFC 7230. Iteration provides the first case-sensitive key seen for each
- case-insensitive pair.
-
- Using ``__setitem__`` syntax overwrites fields that compare equal
- case-insensitively in order to maintain ``dict``'s api. For fields that
- compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
- in a loop.
-
- If multiple fields that are equal case-insensitively are passed to the
- constructor or ``.update``, the behavior is undefined and some will be
- lost.
-
- >>> headers = HTTPHeaderDict()
- >>> headers.add('Set-Cookie', 'foo=bar')
- >>> headers.add('set-cookie', 'baz=quxx')
- >>> headers['content-length'] = '7'
- >>> headers['SET-cookie']
- 'foo=bar, baz=quxx'
- >>> headers['Content-Length']
- '7'
- """
-
- def __init__(self, headers=None, **kwargs):
- super(HTTPHeaderDict, self).__init__()
- self._container = OrderedDict()
- if headers is not None:
- if isinstance(headers, HTTPHeaderDict):
- self._copy_from(headers)
- else:
- self.extend(headers)
- if kwargs:
- self.extend(kwargs)
-
- def __setitem__(self, key, val):
- self._container[key.lower()] = [key, val]
- return self._container[key.lower()]
-
- def __getitem__(self, key):
- val = self._container[key.lower()]
- return ', '.join(val[1:])
-
- def __delitem__(self, key):
- del self._container[key.lower()]
-
- def __contains__(self, key):
- return key.lower() in self._container
-
- def __eq__(self, other):
- if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
- return False
- if not isinstance(other, type(self)):
- other = type(self)(other)
- return (dict((k.lower(), v) for k, v in self.itermerged()) ==
- dict((k.lower(), v) for k, v in other.itermerged()))
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- if not PY3: # Python 2
- iterkeys = MutableMapping.iterkeys
- itervalues = MutableMapping.itervalues
-
- __marker = object()
-
- def __len__(self):
- return len(self._container)
-
- def __iter__(self):
- # Only provide the originally cased names
- for vals in self._container.values():
- yield vals[0]
-
- def pop(self, key, default=__marker):
- '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
- '''
- # Using the MutableMapping function directly fails due to the private marker.
- # Using ordinary dict.pop would expose the internal structures.
- # So let's reinvent the wheel.
- try:
- value = self[key]
- except KeyError:
- if default is self.__marker:
- raise
- return default
- else:
- del self[key]
- return value
-
- def discard(self, key):
- try:
- del self[key]
- except KeyError:
- pass
-
- def add(self, key, val):
- """Adds a (name, value) pair, doesn't overwrite the value if it already
- exists.
-
- >>> headers = HTTPHeaderDict(foo='bar')
- >>> headers.add('Foo', 'baz')
- >>> headers['foo']
- 'bar, baz'
- """
- key_lower = key.lower()
- new_vals = [key, val]
- # Keep the common case aka no item present as fast as possible
- vals = self._container.setdefault(key_lower, new_vals)
- if new_vals is not vals:
- vals.append(val)
-
- def extend(self, *args, **kwargs):
- """Generic import function for any type of header-like object.
- Adapted version of MutableMapping.update in order to insert items
- with self.add instead of self.__setitem__
- """
- if len(args) > 1:
- raise TypeError("extend() takes at most 1 positional "
- "arguments ({0} given)".format(len(args)))
- other = args[0] if len(args) >= 1 else ()
-
- if isinstance(other, HTTPHeaderDict):
- for key, val in other.iteritems():
- self.add(key, val)
- elif isinstance(other, Mapping):
- for key in other:
- self.add(key, other[key])
- elif hasattr(other, "keys"):
- for key in other.keys():
- self.add(key, other[key])
- else:
- for key, value in other:
- self.add(key, value)
-
- for key, value in kwargs.items():
- self.add(key, value)
-
- def getlist(self, key):
- """Returns a list of all the values for the named field. Returns an
- empty list if the key doesn't exist."""
- try:
- vals = self._container[key.lower()]
- except KeyError:
- return []
- else:
- return vals[1:]
-
- # Backwards compatibility for httplib
- getheaders = getlist
- getallmatchingheaders = getlist
- iget = getlist
-
- def __repr__(self):
- return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
-
- def _copy_from(self, other):
- for key in other:
- val = other.getlist(key)
- if isinstance(val, list):
- # Don't need to convert tuples
- val = list(val)
- self._container[key.lower()] = [key] + val
-
- def copy(self):
- clone = type(self)()
- clone._copy_from(self)
- return clone
-
- def iteritems(self):
- """Iterate over all header lines, including duplicate ones."""
- for key in self:
- vals = self._container[key.lower()]
- for val in vals[1:]:
- yield vals[0], val
-
- def itermerged(self):
- """Iterate over all headers, merging duplicate ones together."""
- for key in self:
- val = self._container[key.lower()]
- yield val[0], ', '.join(val[1:])
-
- def items(self):
- return list(self.iteritems())
-
- @classmethod
- def from_httplib(cls, message): # Python 2
- """Read headers from a Python 2 httplib message object."""
- # python2.7 does not expose a proper API for exporting multiheaders
- # efficiently. This function re-reads raw lines from the message
- # object and extracts the multiheaders properly.
- headers = []
-
- for line in message.headers:
- if line.startswith((' ', '\t')):
- key, value = headers[-1]
- headers[-1] = (key, value + '\r\n' + line.rstrip())
- continue
-
- key, value = line.split(':', 1)
- headers.append((key, value.strip()))
-
- return cls(headers)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connection.py b/collectors/python.d.plugin/python_modules/urllib3/connection.py
deleted file mode 100644
index f757493c7..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/connection.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import datetime
-import logging
-import os
-import sys
-import socket
-from socket import error as SocketError, timeout as SocketTimeout
-import warnings
-from .packages import six
-from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
-from .packages.six.moves.http_client import HTTPException # noqa: F401
-
-try: # Compiled with SSL?
- import ssl
- BaseSSLError = ssl.SSLError
-except (ImportError, AttributeError): # Platform-specific: No SSL.
- ssl = None
-
- class BaseSSLError(BaseException):
- pass
-
-
-try: # Python 3:
- # Not a no-op, we're adding this to the namespace so it can be imported.
- ConnectionError = ConnectionError
-except NameError: # Python 2:
- class ConnectionError(Exception):
- pass
-
-
-from .exceptions import (
- NewConnectionError,
- ConnectTimeoutError,
- SubjectAltNameWarning,
- SystemTimeWarning,
-)
-from .packages.ssl_match_hostname import match_hostname, CertificateError
-
-from .util.ssl_ import (
- resolve_cert_reqs,
- resolve_ssl_version,
- assert_fingerprint,
- create_urllib3_context,
- ssl_wrap_socket
-)
-
-
-from .util import connection
-
-from ._collections import HTTPHeaderDict
-
-log = logging.getLogger(__name__)
-
-port_by_scheme = {
- 'http': 80,
- 'https': 443,
-}
-
-# When updating RECENT_DATE, move it to
-# within two years of the current date, and no
-# earlier than 6 months ago.
-RECENT_DATE = datetime.date(2016, 1, 1)
-
-
-class DummyConnection(object):
- """Used to detect a failed ConnectionCls import."""
- pass
-
-
-class HTTPConnection(_HTTPConnection, object):
- """
- Based on httplib.HTTPConnection but provides an extra constructor
- backwards-compatibility layer between older and newer Pythons.
-
- Additional keyword parameters are used to configure attributes of the connection.
- Accepted parameters include:
-
- - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- - ``source_address``: Set the source address for the current connection.
-
- .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
-
- - ``socket_options``: Set specific options on the underlying socket. If not specified, then
- defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
- Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
-
- For example, if you wish to enable TCP Keep Alive in addition to the defaults,
- you might pass::
-
- HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ]
-
- Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
- """
-
- default_port = port_by_scheme['http']
-
- #: Disable Nagle's algorithm by default.
- #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
- default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
-
- #: Whether this connection verifies the host's certificate.
- is_verified = False
-
- def __init__(self, *args, **kw):
- if six.PY3: # Python 3
- kw.pop('strict', None)
-
- # Pre-set source_address in case we have an older Python like 2.6.
- self.source_address = kw.get('source_address')
-
- if sys.version_info < (2, 7): # Python 2.6
- # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
- # not newer versions. We can still use it when creating a
- # connection though, so we pop it *after* we have saved it as
- # self.source_address.
- kw.pop('source_address', None)
-
- #: The socket options provided by the user. If no options are
- #: provided, we use the default options.
- self.socket_options = kw.pop('socket_options', self.default_socket_options)
-
- # Superclass also sets self.source_address in Python 2.7+.
- _HTTPConnection.__init__(self, *args, **kw)
-
- def _new_conn(self):
- """ Establish a socket connection and set nodelay settings on it.
-
- :return: New socket connection.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = connection.create_connection(
- (self.host, self.port), self.timeout, **extra_kw)
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except SocketError as e:
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
- def _prepare_conn(self, conn):
- self.sock = conn
- # the _tunnel_host attribute was added in python 2.6.3 (via
- # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
- # not have them.
- if getattr(self, '_tunnel_host', None):
- # TODO: Fix tunnel so it doesn't depend on self.sock state.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- def request_chunked(self, method, url, body=None, headers=None):
- """
- Alternative to the common request method, which sends the
- body with chunked encoding and not as one block
- """
- headers = HTTPHeaderDict(headers if headers is not None else {})
- skip_accept_encoding = 'accept-encoding' in headers
- skip_host = 'host' in headers
- self.putrequest(
- method,
- url,
- skip_accept_encoding=skip_accept_encoding,
- skip_host=skip_host
- )
- for header, value in headers.items():
- self.putheader(header, value)
- if 'transfer-encoding' not in headers:
- self.putheader('Transfer-Encoding', 'chunked')
- self.endheaders()
-
- if body is not None:
- stringish_types = six.string_types + (six.binary_type,)
- if isinstance(body, stringish_types):
- body = (body,)
- for chunk in body:
- if not chunk:
- continue
- if not isinstance(chunk, six.binary_type):
- chunk = chunk.encode('utf8')
- len_str = hex(len(chunk))[2:]
- self.send(len_str.encode('utf-8'))
- self.send(b'\r\n')
- self.send(chunk)
- self.send(b'\r\n')
-
- # After the if clause, to always have a closed body
- self.send(b'0\r\n\r\n')
-
-
-class HTTPSConnection(HTTPConnection):
- default_port = port_by_scheme['https']
-
- ssl_version = None
-
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- ssl_context=None, **kw):
-
- HTTPConnection.__init__(self, host, port, strict=strict,
- timeout=timeout, **kw)
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.ssl_context = ssl_context
-
- # Required property for Google AppEngine 1.9.0 which otherwise causes
- # HTTPS requests to go out as HTTP. (See Issue #356)
- self._protocol = 'https'
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(None),
- cert_reqs=resolve_cert_reqs(None),
- )
-
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ssl_context=self.ssl_context,
- )
-
-
-class VerifiedHTTPSConnection(HTTPSConnection):
- """
- Based on httplib.HTTPSConnection but wraps the socket with
- SSL certification.
- """
- cert_reqs = None
- ca_certs = None
- ca_cert_dir = None
- ssl_version = None
- assert_fingerprint = None
-
- def set_cert(self, key_file=None, cert_file=None,
- cert_reqs=None, ca_certs=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None):
- """
- This method should only be called once, before the connection is used.
- """
- # If cert_reqs is not provided, we can try to guess. If the user gave
- # us a cert database, we assume they want to use it: otherwise, if
- # they gave us an SSL Context object we should use whatever is set for
- # it.
- if cert_reqs is None:
- if ca_certs or ca_cert_dir:
- cert_reqs = 'CERT_REQUIRED'
- elif self.ssl_context is not None:
- cert_reqs = self.ssl_context.verify_mode
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
- self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
- self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
-
- def connect(self):
- # Add certificate verification
- conn = self._new_conn()
-
- hostname = self.host
- if getattr(self, '_tunnel_host', None):
- # _tunnel_host was added in Python 2.6.3
- # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
-
- self.sock = conn
- # Calls self._set_hostport(), so self.host is
- # self._tunnel_host below.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- # Override the host with the one we're requesting data from.
- hostname = self._tunnel_host
-
- is_time_off = datetime.date.today() < RECENT_DATE
- if is_time_off:
- warnings.warn((
- 'System time is way off (before {0}). This will probably '
- 'lead to SSL verification errors').format(RECENT_DATE),
- SystemTimeWarning
- )
-
- # Wrap socket using verification with the root certs in
- # trusted_root_certs
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(self.ssl_version),
- cert_reqs=resolve_cert_reqs(self.cert_reqs),
- )
-
- context = self.ssl_context
- context.verify_mode = resolve_cert_reqs(self.cert_reqs)
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- server_hostname=hostname,
- ssl_context=context)
-
- if self.assert_fingerprint:
- assert_fingerprint(self.sock.getpeercert(binary_form=True),
- self.assert_fingerprint)
- elif context.verify_mode != ssl.CERT_NONE \
- and not getattr(context, 'check_hostname', False) \
- and self.assert_hostname is not False:
- # While urllib3 attempts to always turn off hostname matching from
- # the TLS library, this cannot always be done. So we check whether
- # the TLS Library still thinks it's matching hostnames.
- cert = self.sock.getpeercert()
- if not cert.get('subjectAltName', ()):
- warnings.warn((
- 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
- '`commonName` for now. This feature is being removed by major browsers and '
- 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
- 'for details.)'.format(hostname)),
- SubjectAltNameWarning
- )
- _match_hostname(cert, self.assert_hostname or hostname)
-
- self.is_verified = (
- context.verify_mode == ssl.CERT_REQUIRED or
- self.assert_fingerprint is not None
- )
-
-
-def _match_hostname(cert, asserted_hostname):
- try:
- match_hostname(cert, asserted_hostname)
- except CertificateError as e:
- log.error(
- 'Certificate did not match expected hostname: %s. '
- 'Certificate: %s', asserted_hostname, cert
- )
- # Add cert to exception and reraise so client code can inspect
- # the cert when catching the exception, if they want to
- e._peer_cert = cert
- raise
-
-
-if ssl:
- # Make a copy for testing.
- UnverifiedHTTPSConnection = HTTPSConnection
- HTTPSConnection = VerifiedHTTPSConnection
-else:
- HTTPSConnection = DummyConnection
diff --git a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
deleted file mode 100644
index 90e4c86a5..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
+++ /dev/null
@@ -1,900 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import errno
-import logging
-import sys
-import warnings
-
-from socket import error as SocketError, timeout as SocketTimeout
-import socket
-
-
-from .exceptions import (
- ClosedPoolError,
- ProtocolError,
- EmptyPoolError,
- HeaderParsingError,
- HostChangedError,
- LocationValueError,
- MaxRetryError,
- ProxyError,
- ReadTimeoutError,
- SSLError,
- TimeoutError,
- InsecureRequestWarning,
- NewConnectionError,
-)
-from .packages.ssl_match_hostname import CertificateError
-from .packages import six
-from .packages.six.moves import queue
-from .connection import (
- port_by_scheme,
- DummyConnection,
- HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
- HTTPException, BaseSSLError,
-)
-from .request import RequestMethods
-from .response import HTTPResponse
-
-from .util.connection import is_connection_dropped
-from .util.request import set_file_position
-from .util.response import assert_header_parsing
-from .util.retry import Retry
-from .util.timeout import Timeout
-from .util.url import get_host, Url
-
-
-if six.PY2:
- # Queue is imported for side effects on MS Windows
- import Queue as _unused_module_Queue # noqa: F401
-
-xrange = six.moves.xrange
-
-log = logging.getLogger(__name__)
-
-_Default = object()
-
-
-# Pool objects
-class ConnectionPool(object):
- """
- Base class for all connection pools, such as
- :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
- """
-
- scheme = None
- QueueCls = queue.LifoQueue
-
- def __init__(self, host, port=None):
- if not host:
- raise LocationValueError("No host specified.")
-
- self.host = _ipv6_host(host).lower()
- self.port = port
-
- def __str__(self):
- return '%s(host=%r, port=%r)' % (type(self).__name__,
- self.host, self.port)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.close()
- # Return False to re-raise any potential exceptions
- return False
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- pass
-
-
-# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
-_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
-
-
-class HTTPConnectionPool(ConnectionPool, RequestMethods):
- """
- Thread-safe connection pool for one host.
-
- :param host:
- Host used for this HTTP Connection (e.g. "localhost"), passed into
- :class:`httplib.HTTPConnection`.
-
- :param port:
- Port used for this HTTP Connection (None is equivalent to 80), passed
- into :class:`httplib.HTTPConnection`.
-
- :param strict:
- Causes BadStatusLine to be raised if the status line can't be parsed
- as a valid HTTP/1.0 or 1.1 status line, passed into
- :class:`httplib.HTTPConnection`.
-
- .. note::
- Only works in Python 2. This parameter is ignored in Python 3.
-
- :param timeout:
- Socket timeout in seconds for each individual connection. This can
- be a float or integer, which sets the timeout for the HTTP request,
- or an instance of :class:`urllib3.util.Timeout` which gives you more
- fine-grained control over request timeouts. After the constructor has
- been parsed, this is always a `urllib3.util.Timeout` object.
-
- :param maxsize:
- Number of connections to save that can be reused. More than 1 is useful
- in multithreaded situations. If ``block`` is set to False, more
- connections will be created but they will not be saved once they've
- been used.
-
- :param block:
- If set to True, no more than ``maxsize`` connections will be used at
- a time. When no free connections are available, the call will block
- until a connection has been released. This is a useful side effect for
- particular multithreaded situations where one does not want to use more
- than maxsize connections per host to prevent flooding.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param retries:
- Retry configuration to use by default with requests in this pool.
-
- :param _proxy:
- Parsed proxy URL, should not be used directly, instead, see
- :class:`urllib3.connectionpool.ProxyManager`"
-
- :param _proxy_headers:
- A dictionary with proxy headers, should not be used directly,
- instead, see :class:`urllib3.connectionpool.ProxyManager`"
-
- :param \\**conn_kw:
- Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
- :class:`urllib3.connection.HTTPSConnection` instances.
- """
-
- scheme = 'http'
- ConnectionCls = HTTPConnection
- ResponseCls = HTTPResponse
-
- def __init__(self, host, port=None, strict=False,
- timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
- headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- **conn_kw):
- ConnectionPool.__init__(self, host, port)
- RequestMethods.__init__(self, headers)
-
- self.strict = strict
-
- if not isinstance(timeout, Timeout):
- timeout = Timeout.from_float(timeout)
-
- if retries is None:
- retries = Retry.DEFAULT
-
- self.timeout = timeout
- self.retries = retries
-
- self.pool = self.QueueCls(maxsize)
- self.block = block
-
- self.proxy = _proxy
- self.proxy_headers = _proxy_headers or {}
-
- # Fill the queue up so that doing get() on it will block properly
- for _ in xrange(maxsize):
- self.pool.put(None)
-
- # These are mostly for testing and debugging purposes.
- self.num_connections = 0
- self.num_requests = 0
- self.conn_kw = conn_kw
-
- if self.proxy:
- # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
- # We cannot know if the user has added default socket options, so we cannot replace the
- # list.
- self.conn_kw.setdefault('socket_options', [])
-
- def _new_conn(self):
- """
- Return a fresh :class:`HTTPConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTP connection (%d): %s",
- self.num_connections, self.host)
-
- conn = self.ConnectionCls(host=self.host, port=self.port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
- return conn
-
- def _get_conn(self, timeout=None):
- """
- Get a connection. Will return a pooled connection if one is available.
-
- If no connections are available and :prop:`.block` is ``False``, then a
- fresh connection is returned.
-
- :param timeout:
- Seconds to wait before giving up and raising
- :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
- :prop:`.block` is ``True``.
- """
- conn = None
- try:
- conn = self.pool.get(block=self.block, timeout=timeout)
-
- except AttributeError: # self.pool is None
- raise ClosedPoolError(self, "Pool is closed.")
-
- except queue.Empty:
- if self.block:
- raise EmptyPoolError(self,
- "Pool reached maximum size and no more "
- "connections are allowed.")
- pass # Oh well, we'll create a new connection then
-
- # If this is a persistent connection, check if it got disconnected
- if conn and is_connection_dropped(conn):
- log.debug("Resetting dropped connection: %s", self.host)
- conn.close()
- if getattr(conn, 'auto_open', 1) == 0:
- # This is a proxied connection that has been mutated by
- # httplib._tunnel() and cannot be reused (since it would
- # attempt to bypass the proxy)
- conn = None
-
- return conn or self._new_conn()
-
- def _put_conn(self, conn):
- """
- Put a connection back into the pool.
-
- :param conn:
- Connection object for the current host and port as returned by
- :meth:`._new_conn` or :meth:`._get_conn`.
-
- If the pool is already full, the connection is closed and discarded
- because we exceeded maxsize. If connections are discarded frequently,
- then maxsize should be increased.
-
- If the pool is closed, then the connection will be closed and discarded.
- """
- try:
- self.pool.put(conn, block=False)
- return # Everything is dandy, done.
- except AttributeError:
- # self.pool is None.
- pass
- except queue.Full:
- # This should never happen if self.block == True
- log.warning(
- "Connection pool is full, discarding connection: %s",
- self.host)
-
- # Connection never got put back into the pool, close it.
- if conn:
- conn.close()
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- pass
-
- def _prepare_proxy(self, conn):
- # Nothing to do for HTTP connections.
- pass
-
- def _get_timeout(self, timeout):
- """ Helper that always returns a :class:`urllib3.util.Timeout` """
- if timeout is _Default:
- return self.timeout.clone()
-
- if isinstance(timeout, Timeout):
- return timeout.clone()
- else:
- # User passed us an int/float. This is for backwards compatibility,
- # can be removed later
- return Timeout.from_float(timeout)
-
- def _raise_timeout(self, err, url, timeout_value):
- """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
-
- if isinstance(err, SocketTimeout):
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # See the above comment about EAGAIN in Python 3. In Python 2 we have
- # to specifically catch it and throw the timeout error
- if hasattr(err, 'errno') and err.errno in _blocking_errnos:
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # Catch possible read timeouts thrown as SSL errors. If not the
- # case, rethrow the original. We need to do this because of:
- # http://bugs.python.org/issue10272
- if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
- **httplib_request_kw):
- """
- Perform a request on a given urllib connection object taken from our
- pool.
-
- :param conn:
- a connection from one of our connection pools
-
- :param timeout:
- Socket timeout in seconds for the request. This can be a
- float or integer, which will set the same timeout value for
- the socket connect and the socket read, or an instance of
- :class:`urllib3.util.Timeout`, which gives you more fine-grained
- control over your timeouts.
- """
- self.num_requests += 1
-
- timeout_obj = self._get_timeout(timeout)
- timeout_obj.start_connect()
- conn.timeout = timeout_obj.connect_timeout
-
- # Trigger any extra validation we need to do.
- try:
- self._validate_conn(conn)
- except (SocketTimeout, BaseSSLError) as e:
- # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
- self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
- raise
-
- # conn.request() calls httplib.*.request, not the method in
- # urllib3.request. It also calls makefile (recv) on the socket.
- if chunked:
- conn.request_chunked(method, url, **httplib_request_kw)
- else:
- conn.request(method, url, **httplib_request_kw)
-
- # Reset the timeout for the recv() on the socket
- read_timeout = timeout_obj.read_timeout
-
- # App Engine doesn't have a sock attr
- if getattr(conn, 'sock', None):
- # In Python 3 socket.py will catch EAGAIN and return None when you
- # try and read into the file pointer created by http.client, which
- # instead raises a BadStatusLine exception. Instead of catching
- # the exception and assuming all BadStatusLine exceptions are read
- # timeouts, check for a zero timeout before making the request.
- if read_timeout == 0:
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % read_timeout)
- if read_timeout is Timeout.DEFAULT_TIMEOUT:
- conn.sock.settimeout(socket.getdefaulttimeout())
- else: # None or a value
- conn.sock.settimeout(read_timeout)
-
- # Receive the response from the server
- try:
- try: # Python 2.7, use buffering of HTTP responses
- httplib_response = conn.getresponse(buffering=True)
- except TypeError: # Python 2.6 and older, Python 3
- try:
- httplib_response = conn.getresponse()
- except Exception as e:
- # Remove the TypeError from the exception chain in Python 3;
- # otherwise it looks like a programming error was the cause.
- six.raise_from(e, None)
- except (SocketTimeout, BaseSSLError, SocketError) as e:
- self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
- raise
-
- # AppEngine doesn't have a version attr.
- http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
- log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
- method, url, http_version, httplib_response.status,
- httplib_response.length)
-
- try:
- assert_header_parsing(httplib_response.msg)
- except HeaderParsingError as hpe: # Platform-specific: Python 3
- log.warning(
- 'Failed to parse headers (url=%s): %s',
- self._absolute_url(url), hpe, exc_info=True)
-
- return httplib_response
-
- def _absolute_url(self, path):
- return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- # Disable access to the pool
- old_pool, self.pool = self.pool, None
-
- try:
- while True:
- conn = old_pool.get(block=False)
- if conn:
- conn.close()
-
- except queue.Empty:
- pass # Done.
-
- def is_same_host(self, url):
- """
- Check if the given ``url`` is a member of the same host as this
- connection pool.
- """
- if url.startswith('/'):
- return True
-
- # TODO: Add optional support for socket.gethostbyname checking.
- scheme, host, port = get_host(url)
-
- host = _ipv6_host(host).lower()
-
- # Use explicit default port for comparison when none is given
- if self.port and not port:
- port = port_by_scheme.get(scheme)
- elif not self.port and port == port_by_scheme.get(scheme):
- port = None
-
- return (scheme, host, port) == (self.scheme, self.host, self.port)
-
- def urlopen(self, method, url, body=None, headers=None, retries=None,
- redirect=True, assert_same_host=True, timeout=_Default,
- pool_timeout=None, release_conn=None, chunked=False,
- body_pos=None, **response_kw):
- """
- Get a connection from the pool and perform an HTTP request. This is the
- lowest level call for making a request, so you'll need to specify all
- the raw details.
-
- .. note::
-
- More commonly, it's appropriate to use a convenience method provided
- by :class:`.RequestMethods`, such as :meth:`request`.
-
- .. note::
-
- `release_conn` will only behave as expected if
- `preload_content=False` because we want to make
- `preload_content=False` the default behaviour someday soon without
- breaking backwards compatibility.
-
- :param method:
- HTTP request method (such as GET, POST, PUT, etc.)
-
- :param body:
- Data to send in the request body (useful for creating
- POST requests, see HTTPConnectionPool.post_url for
- more convenience).
-
- :param headers:
- Dictionary of custom headers to send, such as User-Agent,
- If-None-Match, etc. If None, pool headers are used. If provided,
- these headers completely replace any pool-specific headers.
-
- :param retries:
- Configure the number of retries to allow before raising a
- :class:`~urllib3.exceptions.MaxRetryError` exception.
-
- Pass ``None`` to retry until you receive a response. Pass a
- :class:`~urllib3.util.retry.Retry` object for fine-grained control
- over different types of retries.
- Pass an integer number to retry connection errors that many times,
- but no other types of errors. Pass zero to never retry.
-
- If ``False``, then retries are disabled and any exception is raised
- immediately. Also, instead of raising a MaxRetryError on redirects,
- the redirect response will be returned.
-
- :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
-
- :param redirect:
- If True, automatically handle redirects (status codes 301, 302,
- 303, 307, 308). Each redirect counts as a retry. Disabling retries
- will disable redirect, too.
-
- :param assert_same_host:
- If ``True``, will make sure that the host of the pool requests is
- consistent else will raise HostChangedError. When False, you can
- use the pool on an HTTP proxy and request foreign hosts.
-
- :param timeout:
- If specified, overrides the default timeout for this one
- request. It may be a float (in seconds) or an instance of
- :class:`urllib3.util.Timeout`.
-
- :param pool_timeout:
- If set and the pool is set to block=True, then this method will
- block for ``pool_timeout`` seconds and raise EmptyPoolError if no
- connection is available within the time period.
-
- :param release_conn:
- If False, then the urlopen call will not release the connection
- back into the pool once a response is received (but will release if
- you read the entire contents of the response such as when
- `preload_content=True`). This is useful if you're not preloading
- the response's content immediately. You will need to call
- ``r.release_conn()`` on the response ``r`` to return the connection
- back into the pool. If None, it takes the value of
- ``response_kw.get('preload_content', True)``.
-
- :param chunked:
- If True, urllib3 will send the body using chunked transfer
- encoding. Otherwise, urllib3 will send the body using the standard
- content-length form. Defaults to False.
-
- :param int body_pos:
- Position to seek to in file-like body in the event of a retry or
- redirect. Typically this won't need to be set because urllib3 will
- auto-populate the value when needed.
-
- :param \\**response_kw:
- Additional parameters are passed to
- :meth:`urllib3.response.HTTPResponse.from_httplib`
- """
- if headers is None:
- headers = self.headers
-
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
-
- if release_conn is None:
- release_conn = response_kw.get('preload_content', True)
-
- # Check host
- if assert_same_host and not self.is_same_host(url):
- raise HostChangedError(self, url, retries)
-
- conn = None
-
- # Track whether `conn` needs to be released before
- # returning/raising/recursing. Update this variable if necessary, and
- # leave `release_conn` constant throughout the function. That way, if
- # the function recurses, the original value of `release_conn` will be
- # passed down into the recursive call, and its value will be respected.
- #
- # See issue #651 [1] for details.
- #
- # [1] <https://github.com/shazow/urllib3/issues/651>
- release_this_conn = release_conn
-
- # Merge the proxy headers. Only do this in HTTP. We have to copy the
- # headers dict so we can safely change it without those changes being
- # reflected in anyone else's copy.
- if self.scheme == 'http':
- headers = headers.copy()
- headers.update(self.proxy_headers)
-
- # Must keep the exception bound to a separate variable or else Python 3
- # complains about UnboundLocalError.
- err = None
-
- # Keep track of whether we cleanly exited the except block. This
- # ensures we do proper cleanup in finally.
- clean_exit = False
-
- # Rewind body position, if needed. Record current position
- # for future rewinds in the event of a redirect/retry.
- body_pos = set_file_position(body, body_pos)
-
- try:
- # Request a connection from the queue.
- timeout_obj = self._get_timeout(timeout)
- conn = self._get_conn(timeout=pool_timeout)
-
- conn.timeout = timeout_obj.connect_timeout
-
- is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
- if is_new_proxy_conn:
- self._prepare_proxy(conn)
-
- # Make the request on the httplib connection object.
- httplib_response = self._make_request(conn, method, url,
- timeout=timeout_obj,
- body=body, headers=headers,
- chunked=chunked)
-
- # If we're going to release the connection in ``finally:``, then
- # the response doesn't need to know about the connection. Otherwise
- # it will also try to release it and we'll have a double-release
- # mess.
- response_conn = conn if not release_conn else None
-
- # Pass method to Response for length checking
- response_kw['request_method'] = method
-
- # Import httplib's response into our own wrapper object
- response = self.ResponseCls.from_httplib(httplib_response,
- pool=self,
- connection=response_conn,
- retries=retries,
- **response_kw)
-
- # Everything went great!
- clean_exit = True
-
- except queue.Empty:
- # Timed out by queue.
- raise EmptyPoolError(self, "No pool connections are available.")
-
- except (BaseSSLError, CertificateError) as e:
- # Close the connection. If a connection is reused on which there
- # was a Certificate error, the next request will certainly raise
- # another Certificate error.
- clean_exit = False
- raise SSLError(e)
-
- except SSLError:
- # Treat SSLError separately from BaseSSLError to preserve
- # traceback.
- clean_exit = False
- raise
-
- except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
- # Discard the connection for these exceptions. It will be
- # be replaced during the next _get_conn() call.
- clean_exit = False
-
- if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
- e = ProxyError('Cannot connect to proxy.', e)
- elif isinstance(e, (SocketError, HTTPException)):
- e = ProtocolError('Connection aborted.', e)
-
- retries = retries.increment(method, url, error=e, _pool=self,
- _stacktrace=sys.exc_info()[2])
- retries.sleep()
-
- # Keep track of the error for the retry warning.
- err = e
-
- finally:
- if not clean_exit:
- # We hit some kind of exception, handled or otherwise. We need
- # to throw the connection away unless explicitly told not to.
- # Close the connection, set the variable to None, and make sure
- # we put the None back in the pool to avoid leaking it.
- conn = conn and conn.close()
- release_this_conn = True
-
- if release_this_conn:
- # Put the connection back to be reused. If the connection is
- # expired then it will be None, which will get replaced with a
- # fresh connection during _get_conn.
- self._put_conn(conn)
-
- if not conn:
- # Try again
- log.warning("Retrying (%r) after connection "
- "broken by '%r': %s", retries, err, url)
- return self.urlopen(method, url, body, headers, retries,
- redirect, assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and response.get_redirect_location()
- if redirect_location:
- if response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
-
- retries.sleep_for_retry(response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(
- method, redirect_location, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(response.getheader('Retry-After'))
- if retries.is_retry(method, response.status, has_retry_after):
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_status:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
- retries.sleep(response)
- log.debug("Retry: %s", url)
- return self.urlopen(
- method, url, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn,
- body_pos=body_pos, **response_kw)
-
- return response
-
-
-class HTTPSConnectionPool(HTTPConnectionPool):
- """
- Same as :class:`.HTTPConnectionPool`, but HTTPS.
-
- When Python is compiled with the :mod:`ssl` module, then
- :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
- instead of :class:`.HTTPSConnection`.
-
- :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
- ``assert_hostname`` and ``host`` in this order to verify connections.
- If ``assert_hostname`` is False, no verification is done.
-
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
- ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
- available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
- the connection socket into an SSL socket.
- """
-
- scheme = 'https'
- ConnectionCls = HTTPSConnection
-
- def __init__(self, host, port=None,
- strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
- block=False, headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- key_file=None, cert_file=None, cert_reqs=None,
- ca_certs=None, ssl_version=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None, **conn_kw):
-
- HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
- block, headers, retries, _proxy, _proxy_headers,
- **conn_kw)
-
- if ca_certs and cert_reqs is None:
- cert_reqs = 'CERT_REQUIRED'
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.ca_certs = ca_certs
- self.ca_cert_dir = ca_cert_dir
- self.ssl_version = ssl_version
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
-
- def _prepare_conn(self, conn):
- """
- Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
- and establish the tunnel if proxy is used.
- """
-
- if isinstance(conn, VerifiedHTTPSConnection):
- conn.set_cert(key_file=self.key_file,
- cert_file=self.cert_file,
- cert_reqs=self.cert_reqs,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- assert_hostname=self.assert_hostname,
- assert_fingerprint=self.assert_fingerprint)
- conn.ssl_version = self.ssl_version
- return conn
-
- def _prepare_proxy(self, conn):
- """
- Establish tunnel connection early, because otherwise httplib
- would improperly set Host: header to proxy's IP:port.
- """
- # Python 2.7+
- try:
- set_tunnel = conn.set_tunnel
- except AttributeError: # Platform-specific: Python 2.6
- set_tunnel = conn._set_tunnel
-
- if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
- set_tunnel(self.host, self.port)
- else:
- set_tunnel(self.host, self.port, self.proxy_headers)
-
- conn.connect()
-
- def _new_conn(self):
- """
- Return a fresh :class:`httplib.HTTPSConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTPS connection (%d): %s",
- self.num_connections, self.host)
-
- if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
- raise SSLError("Can't connect to HTTPS URL because the SSL "
- "module is not available.")
-
- actual_host = self.host
- actual_port = self.port
- if self.proxy is not None:
- actual_host = self.proxy.host
- actual_port = self.proxy.port
-
- conn = self.ConnectionCls(host=actual_host, port=actual_port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
-
- return self._prepare_conn(conn)
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- super(HTTPSConnectionPool, self)._validate_conn(conn)
-
- # Force connect early to allow us to validate the connection.
- if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
- conn.connect()
-
- if not conn.is_verified:
- warnings.warn((
- 'Unverified HTTPS request is being made. '
- 'Adding certificate verification is strongly advised. See: '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings'),
- InsecureRequestWarning)
-
-
-def connection_from_url(url, **kw):
- """
- Given a url, return an :class:`.ConnectionPool` instance of its host.
-
- This is a shortcut for not having to parse out the scheme, host, and port
- of the url before creating an :class:`.ConnectionPool` instance.
-
- :param url:
- Absolute URL string that must include the scheme. Port is optional.
-
- :param \\**kw:
- Passes additional parameters to the constructor of the appropriate
- :class:`.ConnectionPool`. Useful for specifying things like
- timeout, maxsize, headers, etc.
-
- Example::
-
- >>> conn = connection_from_url('http://google.com/')
- >>> r = conn.request('GET', '/')
- """
- scheme, host, port = get_host(url)
- port = port or port_by_scheme.get(scheme, 80)
- if scheme == 'https':
- return HTTPSConnectionPool(host, port=port, **kw)
- else:
- return HTTPConnectionPool(host, port=port, **kw)
-
-
-def _ipv6_host(host):
- """
- Process IPv6 address literals
- """
-
- # httplib doesn't like it when we include brackets in IPv6 addresses
- # Specifically, if we include brackets but also pass the port then
- # httplib crazily doubles up the square brackets on the Host header.
- # Instead, we need to make sure we never pass ``None`` as the port.
- # However, for backward compatibility reasons we can't actually
- # *assert* that. See http://bugs.python.org/issue28539
- #
- # Also if an IPv6 address literal has a zone identifier, the
- # percent sign might be URIencoded, convert it back into ASCII
- if host.startswith('[') and host.endswith(']'):
- host = host.replace('%25', '%').strip('[]')
- return host
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
deleted file mode 100644
index bb826673f..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ /dev/null
@@ -1,591 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-This module uses ctypes to bind a whole bunch of functions and constants from
-SecureTransport. The goal here is to provide the low-level API to
-SecureTransport. These are essentially the C-level functions and constants, and
-they're pretty gross to work with.
-
-This code is a bastardised version of the code found in Will Bond's oscrypto
-library. An enormous debt is owed to him for blazing this trail for us. For
-that reason, this code should be considered to be covered both by urllib3's
-license and by oscrypto's:
-
- Copyright (c) 2015-2016 Will Bond <will@wbond.net>
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
- to deal in the Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute, sublicense,
- and/or sell copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- DEALINGS IN THE SOFTWARE.
-"""
-from __future__ import absolute_import
-
-import platform
-from ctypes.util import find_library
-from ctypes import (
- c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
- c_bool
-)
-from ctypes import CDLL, POINTER, CFUNCTYPE
-
-
-security_path = find_library('Security')
-if not security_path:
- raise ImportError('The library Security could not be found')
-
-
-core_foundation_path = find_library('CoreFoundation')
-if not core_foundation_path:
- raise ImportError('The library CoreFoundation could not be found')
-
-
-version = platform.mac_ver()[0]
-version_info = tuple(map(int, version.split('.')))
-if version_info < (10, 8):
- raise OSError(
- 'Only OS X 10.8 and newer are supported, not %s.%s' % (
- version_info[0], version_info[1]
- )
- )
-
-Security = CDLL(security_path, use_errno=True)
-CoreFoundation = CDLL(core_foundation_path, use_errno=True)
-
-Boolean = c_bool
-CFIndex = c_long
-CFStringEncoding = c_uint32
-CFData = c_void_p
-CFString = c_void_p
-CFArray = c_void_p
-CFMutableArray = c_void_p
-CFDictionary = c_void_p
-CFError = c_void_p
-CFType = c_void_p
-CFTypeID = c_ulong
-
-CFTypeRef = POINTER(CFType)
-CFAllocatorRef = c_void_p
-
-OSStatus = c_int32
-
-CFDataRef = POINTER(CFData)
-CFStringRef = POINTER(CFString)
-CFArrayRef = POINTER(CFArray)
-CFMutableArrayRef = POINTER(CFMutableArray)
-CFDictionaryRef = POINTER(CFDictionary)
-CFArrayCallBacks = c_void_p
-CFDictionaryKeyCallBacks = c_void_p
-CFDictionaryValueCallBacks = c_void_p
-
-SecCertificateRef = POINTER(c_void_p)
-SecExternalFormat = c_uint32
-SecExternalItemType = c_uint32
-SecIdentityRef = POINTER(c_void_p)
-SecItemImportExportFlags = c_uint32
-SecItemImportExportKeyParameters = c_void_p
-SecKeychainRef = POINTER(c_void_p)
-SSLProtocol = c_uint32
-SSLCipherSuite = c_uint32
-SSLContextRef = POINTER(c_void_p)
-SecTrustRef = POINTER(c_void_p)
-SSLConnectionRef = c_uint32
-SecTrustResultType = c_uint32
-SecTrustOptionFlags = c_uint32
-SSLProtocolSide = c_uint32
-SSLConnectionType = c_uint32
-SSLSessionOption = c_uint32
-
-
-try:
- Security.SecItemImport.argtypes = [
- CFDataRef,
- CFStringRef,
- POINTER(SecExternalFormat),
- POINTER(SecExternalItemType),
- SecItemImportExportFlags,
- POINTER(SecItemImportExportKeyParameters),
- SecKeychainRef,
- POINTER(CFArrayRef),
- ]
- Security.SecItemImport.restype = OSStatus
-
- Security.SecCertificateGetTypeID.argtypes = []
- Security.SecCertificateGetTypeID.restype = CFTypeID
-
- Security.SecIdentityGetTypeID.argtypes = []
- Security.SecIdentityGetTypeID.restype = CFTypeID
-
- Security.SecKeyGetTypeID.argtypes = []
- Security.SecKeyGetTypeID.restype = CFTypeID
-
- Security.SecCertificateCreateWithData.argtypes = [
- CFAllocatorRef,
- CFDataRef
- ]
- Security.SecCertificateCreateWithData.restype = SecCertificateRef
-
- Security.SecCertificateCopyData.argtypes = [
- SecCertificateRef
- ]
- Security.SecCertificateCopyData.restype = CFDataRef
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SecIdentityCreateWithCertificate.argtypes = [
- CFTypeRef,
- SecCertificateRef,
- POINTER(SecIdentityRef)
- ]
- Security.SecIdentityCreateWithCertificate.restype = OSStatus
-
- Security.SecKeychainCreate.argtypes = [
- c_char_p,
- c_uint32,
- c_void_p,
- Boolean,
- c_void_p,
- POINTER(SecKeychainRef)
- ]
- Security.SecKeychainCreate.restype = OSStatus
-
- Security.SecKeychainDelete.argtypes = [
- SecKeychainRef
- ]
- Security.SecKeychainDelete.restype = OSStatus
-
- Security.SecPKCS12Import.argtypes = [
- CFDataRef,
- CFDictionaryRef,
- POINTER(CFArrayRef)
- ]
- Security.SecPKCS12Import.restype = OSStatus
-
- SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
- SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
-
- Security.SSLSetIOFuncs.argtypes = [
- SSLContextRef,
- SSLReadFunc,
- SSLWriteFunc
- ]
- Security.SSLSetIOFuncs.restype = OSStatus
-
- Security.SSLSetPeerID.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerID.restype = OSStatus
-
- Security.SSLSetCertificate.argtypes = [
- SSLContextRef,
- CFArrayRef
- ]
- Security.SSLSetCertificate.restype = OSStatus
-
- Security.SSLSetCertificateAuthorities.argtypes = [
- SSLContextRef,
- CFTypeRef,
- Boolean
- ]
- Security.SSLSetCertificateAuthorities.restype = OSStatus
-
- Security.SSLSetConnection.argtypes = [
- SSLContextRef,
- SSLConnectionRef
- ]
- Security.SSLSetConnection.restype = OSStatus
-
- Security.SSLSetPeerDomainName.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerDomainName.restype = OSStatus
-
- Security.SSLHandshake.argtypes = [
- SSLContextRef
- ]
- Security.SSLHandshake.restype = OSStatus
-
- Security.SSLRead.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLRead.restype = OSStatus
-
- Security.SSLWrite.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLWrite.restype = OSStatus
-
- Security.SSLClose.argtypes = [
- SSLContextRef
- ]
- Security.SSLClose.restype = OSStatus
-
- Security.SSLGetNumberSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberSupportedCiphers.restype = OSStatus
-
- Security.SSLGetSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetSupportedCiphers.restype = OSStatus
-
- Security.SSLSetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- c_size_t
- ]
- Security.SSLSetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNumberEnabledCiphers.argtype = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberEnabledCiphers.restype = OSStatus
-
- Security.SSLGetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNegotiatedCipher.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite)
- ]
- Security.SSLGetNegotiatedCipher.restype = OSStatus
-
- Security.SSLGetNegotiatedProtocolVersion.argtypes = [
- SSLContextRef,
- POINTER(SSLProtocol)
- ]
- Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
-
- Security.SSLCopyPeerTrust.argtypes = [
- SSLContextRef,
- POINTER(SecTrustRef)
- ]
- Security.SSLCopyPeerTrust.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificates.argtypes = [
- SecTrustRef,
- CFArrayRef
- ]
- Security.SecTrustSetAnchorCertificates.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
- SecTrustRef,
- Boolean
- ]
- Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
-
- Security.SecTrustEvaluate.argtypes = [
- SecTrustRef,
- POINTER(SecTrustResultType)
- ]
- Security.SecTrustEvaluate.restype = OSStatus
-
- Security.SecTrustGetCertificateCount.argtypes = [
- SecTrustRef
- ]
- Security.SecTrustGetCertificateCount.restype = CFIndex
-
- Security.SecTrustGetCertificateAtIndex.argtypes = [
- SecTrustRef,
- CFIndex
- ]
- Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
-
- Security.SSLCreateContext.argtypes = [
- CFAllocatorRef,
- SSLProtocolSide,
- SSLConnectionType
- ]
- Security.SSLCreateContext.restype = SSLContextRef
-
- Security.SSLSetSessionOption.argtypes = [
- SSLContextRef,
- SSLSessionOption,
- Boolean
- ]
- Security.SSLSetSessionOption.restype = OSStatus
-
- Security.SSLSetProtocolVersionMin.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMin.restype = OSStatus
-
- Security.SSLSetProtocolVersionMax.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMax.restype = OSStatus
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SSLReadFunc = SSLReadFunc
- Security.SSLWriteFunc = SSLWriteFunc
- Security.SSLContextRef = SSLContextRef
- Security.SSLProtocol = SSLProtocol
- Security.SSLCipherSuite = SSLCipherSuite
- Security.SecIdentityRef = SecIdentityRef
- Security.SecKeychainRef = SecKeychainRef
- Security.SecTrustRef = SecTrustRef
- Security.SecTrustResultType = SecTrustResultType
- Security.SecExternalFormat = SecExternalFormat
- Security.OSStatus = OSStatus
-
- Security.kSecImportExportPassphrase = CFStringRef.in_dll(
- Security, 'kSecImportExportPassphrase'
- )
- Security.kSecImportItemIdentity = CFStringRef.in_dll(
- Security, 'kSecImportItemIdentity'
- )
-
- # CoreFoundation time!
- CoreFoundation.CFRetain.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRetain.restype = CFTypeRef
-
- CoreFoundation.CFRelease.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRelease.restype = None
-
- CoreFoundation.CFGetTypeID.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFGetTypeID.restype = CFTypeID
-
- CoreFoundation.CFStringCreateWithCString.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFStringEncoding
- ]
- CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
-
- CoreFoundation.CFStringGetCStringPtr.argtypes = [
- CFStringRef,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
-
- CoreFoundation.CFStringGetCString.argtypes = [
- CFStringRef,
- c_char_p,
- CFIndex,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCString.restype = c_bool
-
- CoreFoundation.CFDataCreate.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFIndex
- ]
- CoreFoundation.CFDataCreate.restype = CFDataRef
-
- CoreFoundation.CFDataGetLength.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetLength.restype = CFIndex
-
- CoreFoundation.CFDataGetBytePtr.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetBytePtr.restype = c_void_p
-
- CoreFoundation.CFDictionaryCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- POINTER(CFTypeRef),
- CFIndex,
- CFDictionaryKeyCallBacks,
- CFDictionaryValueCallBacks
- ]
- CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
-
- CoreFoundation.CFDictionaryGetValue.argtypes = [
- CFDictionaryRef,
- CFTypeRef
- ]
- CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
-
- CoreFoundation.CFArrayCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- CFIndex,
- CFArrayCallBacks,
- ]
- CoreFoundation.CFArrayCreate.restype = CFArrayRef
-
- CoreFoundation.CFArrayCreateMutable.argtypes = [
- CFAllocatorRef,
- CFIndex,
- CFArrayCallBacks
- ]
- CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
-
- CoreFoundation.CFArrayAppendValue.argtypes = [
- CFMutableArrayRef,
- c_void_p
- ]
- CoreFoundation.CFArrayAppendValue.restype = None
-
- CoreFoundation.CFArrayGetCount.argtypes = [
- CFArrayRef
- ]
- CoreFoundation.CFArrayGetCount.restype = CFIndex
-
- CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
- CFArrayRef,
- CFIndex
- ]
- CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
-
- CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
- CoreFoundation, 'kCFAllocatorDefault'
- )
- CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
- CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
- )
- CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
- )
-
- CoreFoundation.CFTypeRef = CFTypeRef
- CoreFoundation.CFArrayRef = CFArrayRef
- CoreFoundation.CFStringRef = CFStringRef
- CoreFoundation.CFDictionaryRef = CFDictionaryRef
-
-except (AttributeError):
- raise ImportError('Error initializing ctypes')
-
-
-class CFConst(object):
- """
- A class object that acts as essentially a namespace for CoreFoundation
- constants.
- """
- kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
-
-
-class SecurityConst(object):
- """
- A class object that acts as essentially a namespace for Security constants.
- """
- kSSLSessionOptionBreakOnServerAuth = 0
-
- kSSLProtocol2 = 1
- kSSLProtocol3 = 2
- kTLSProtocol1 = 4
- kTLSProtocol11 = 7
- kTLSProtocol12 = 8
-
- kSSLClientSide = 1
- kSSLStreamType = 0
-
- kSecFormatPEMSequence = 10
-
- kSecTrustResultInvalid = 0
- kSecTrustResultProceed = 1
- # This gap is present on purpose: this was kSecTrustResultConfirm, which
- # is deprecated.
- kSecTrustResultDeny = 3
- kSecTrustResultUnspecified = 4
- kSecTrustResultRecoverableTrustFailure = 5
- kSecTrustResultFatalTrustFailure = 6
- kSecTrustResultOtherError = 7
-
- errSSLProtocol = -9800
- errSSLWouldBlock = -9803
- errSSLClosedGraceful = -9805
- errSSLClosedNoNotify = -9816
- errSSLClosedAbort = -9806
-
- errSSLXCertChainInvalid = -9807
- errSSLCrypto = -9809
- errSSLInternal = -9810
- errSSLCertExpired = -9814
- errSSLCertNotYetValid = -9815
- errSSLUnknownRootCert = -9812
- errSSLNoRootCert = -9813
- errSSLHostNameMismatch = -9843
- errSSLPeerHandshakeFail = -9824
- errSSLPeerUserCancelled = -9839
- errSSLWeakPeerEphemeralDHKey = -9850
- errSSLServerAuthCompleted = -9841
- errSSLRecordOverflow = -9847
-
- errSecVerifyFailed = -67808
- errSecNoTrustSettings = -25263
- errSecItemNotFound = -25300
- errSecInvalidTrustSettings = -25262
-
- # Cipher suites. We only pick the ones our default cipher string allows.
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
- TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
- TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
- TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
- TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
- TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
- TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
- TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
- TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
- TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
deleted file mode 100644
index 0f79a1372..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-Low-level helpers for the SecureTransport bindings.
-
-These are Python functions that are not directly related to the high-level APIs
-but are necessary to get them to work. They include a whole bunch of low-level
-CoreFoundation messing about and memory management. The concerns in this module
-are almost entirely about trying to avoid memory leaks and providing
-appropriate and useful assistance to the higher-level code.
-"""
-import base64
-import ctypes
-import itertools
-import re
-import os
-import ssl
-import tempfile
-
-from .bindings import Security, CoreFoundation, CFConst
-
-
-# This regular expression is used to grab PEM data out of a PEM bundle.
-_PEM_CERTS_RE = re.compile(
- b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
-)
-
-
-def _cf_data_from_bytes(bytestring):
- """
- Given a bytestring, create a CFData object from it. This CFData object must
- be CFReleased by the caller.
- """
- return CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
- )
-
-
-def _cf_dictionary_from_tuples(tuples):
- """
- Given a list of Python tuples, create an associated CFDictionary.
- """
- dictionary_size = len(tuples)
-
- # We need to get the dictionary keys and values out in the same order.
- keys = (t[0] for t in tuples)
- values = (t[1] for t in tuples)
- cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
- cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
-
- return CoreFoundation.CFDictionaryCreate(
- CoreFoundation.kCFAllocatorDefault,
- cf_keys,
- cf_values,
- dictionary_size,
- CoreFoundation.kCFTypeDictionaryKeyCallBacks,
- CoreFoundation.kCFTypeDictionaryValueCallBacks,
- )
-
-
-def _cf_string_to_unicode(value):
- """
- Creates a Unicode string from a CFString object. Used entirely for error
- reporting.
-
- Yes, it annoys me quite a lot that this function is this complex.
- """
- value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
-
- string = CoreFoundation.CFStringGetCStringPtr(
- value_as_void_p,
- CFConst.kCFStringEncodingUTF8
- )
- if string is None:
- buffer = ctypes.create_string_buffer(1024)
- result = CoreFoundation.CFStringGetCString(
- value_as_void_p,
- buffer,
- 1024,
- CFConst.kCFStringEncodingUTF8
- )
- if not result:
- raise OSError('Error copying C string from CFStringRef')
- string = buffer.value
- if string is not None:
- string = string.decode('utf-8')
- return string
-
-
-def _assert_no_error(error, exception_class=None):
- """
- Checks the return code and throws an exception if there is an error to
- report
- """
- if error == 0:
- return
-
- cf_error_string = Security.SecCopyErrorMessageString(error, None)
- output = _cf_string_to_unicode(cf_error_string)
- CoreFoundation.CFRelease(cf_error_string)
-
- if output is None or output == u'':
- output = u'OSStatus %s' % error
-
- if exception_class is None:
- exception_class = ssl.SSLError
-
- raise exception_class(output)
-
-
-def _cert_array_from_pem(pem_bundle):
- """
- Given a bundle of certs in PEM format, turns them into a CFArray of certs
- that can be used to validate a cert chain.
- """
- der_certs = [
- base64.b64decode(match.group(1))
- for match in _PEM_CERTS_RE.finditer(pem_bundle)
- ]
- if not der_certs:
- raise ssl.SSLError("No root certificates specified")
-
- cert_array = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
- )
- if not cert_array:
- raise ssl.SSLError("Unable to allocate memory!")
-
- try:
- for der_bytes in der_certs:
- certdata = _cf_data_from_bytes(der_bytes)
- if not certdata:
- raise ssl.SSLError("Unable to allocate memory!")
- cert = Security.SecCertificateCreateWithData(
- CoreFoundation.kCFAllocatorDefault, certdata
- )
- CoreFoundation.CFRelease(certdata)
- if not cert:
- raise ssl.SSLError("Unable to build cert object!")
-
- CoreFoundation.CFArrayAppendValue(cert_array, cert)
- CoreFoundation.CFRelease(cert)
- except Exception:
- # We need to free the array before the exception bubbles further.
- # We only want to do that if an error occurs: otherwise, the caller
- # should free.
- CoreFoundation.CFRelease(cert_array)
-
- return cert_array
-
-
-def _is_cert(item):
- """
- Returns True if a given CFTypeRef is a certificate.
- """
- expected = Security.SecCertificateGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _is_identity(item):
- """
- Returns True if a given CFTypeRef is an identity.
- """
- expected = Security.SecIdentityGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _temporary_keychain():
- """
- This function creates a temporary Mac keychain that we can use to work with
- credentials. This keychain uses a one-time password and a temporary file to
- store the data. We expect to have one keychain per socket. The returned
- SecKeychainRef must be freed by the caller, including calling
- SecKeychainDelete.
-
- Returns a tuple of the SecKeychainRef and the path to the temporary
- directory that contains it.
- """
- # Unfortunately, SecKeychainCreate requires a path to a keychain. This
- # means we cannot use mkstemp to use a generic temporary file. Instead,
- # we're going to create a temporary directory and a filename to use there.
- # This filename will be 8 random bytes expanded into base64. We also need
- # some random bytes to password-protect the keychain we're creating, so we
- # ask for 40 random bytes.
- random_bytes = os.urandom(40)
- filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
- password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
- tempdirectory = tempfile.mkdtemp()
-
- keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
-
- # We now want to create the keychain itself.
- keychain = Security.SecKeychainRef()
- status = Security.SecKeychainCreate(
- keychain_path,
- len(password),
- password,
- False,
- None,
- ctypes.byref(keychain)
- )
- _assert_no_error(status)
-
- # Having created the keychain, we want to pass it off to the caller.
- return keychain, tempdirectory
-
-
-def _load_items_from_file(keychain, path):
- """
- Given a single file, loads all the trust objects from it into arrays and
- the keychain.
- Returns a tuple of lists: the first list is a list of identities, the
- second a list of certs.
- """
- certificates = []
- identities = []
- result_array = None
-
- with open(path, 'rb') as f:
- raw_filedata = f.read()
-
- try:
- filedata = CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault,
- raw_filedata,
- len(raw_filedata)
- )
- result_array = CoreFoundation.CFArrayRef()
- result = Security.SecItemImport(
- filedata, # cert data
- None, # Filename, leaving it out for now
- None, # What the type of the file is, we don't care
- None, # what's in the file, we don't care
- 0, # import flags
- None, # key params, can include passphrase in the future
- keychain, # The keychain to insert into
- ctypes.byref(result_array) # Results
- )
- _assert_no_error(result)
-
- # A CFArray is not very useful to us as an intermediary
- # representation, so we are going to extract the objects we want
- # and then free the array. We don't need to keep hold of keys: the
- # keychain already has them!
- result_count = CoreFoundation.CFArrayGetCount(result_array)
- for index in range(result_count):
- item = CoreFoundation.CFArrayGetValueAtIndex(
- result_array, index
- )
- item = ctypes.cast(item, CoreFoundation.CFTypeRef)
-
- if _is_cert(item):
- CoreFoundation.CFRetain(item)
- certificates.append(item)
- elif _is_identity(item):
- CoreFoundation.CFRetain(item)
- identities.append(item)
- finally:
- if result_array:
- CoreFoundation.CFRelease(result_array)
-
- CoreFoundation.CFRelease(filedata)
-
- return (identities, certificates)
-
-
-def _load_client_cert_chain(keychain, *paths):
- """
- Load certificates and maybe keys from a number of files. Has the end goal
- of returning a CFArray containing one SecIdentityRef, and then zero or more
- SecCertificateRef objects, suitable for use as a client certificate trust
- chain.
- """
- # Ok, the strategy.
- #
- # This relies on knowing that macOS will not give you a SecIdentityRef
- # unless you have imported a key into a keychain. This is a somewhat
- # artificial limitation of macOS (for example, it doesn't necessarily
- # affect iOS), but there is nothing inside Security.framework that lets you
- # get a SecIdentityRef without having a key in a keychain.
- #
- # So the policy here is we take all the files and iterate them in order.
- # Each one will use SecItemImport to have one or more objects loaded from
- # it. We will also point at a keychain that macOS can use to work with the
- # private key.
- #
- # Once we have all the objects, we'll check what we actually have. If we
- # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
- # we'll take the first certificate (which we assume to be our leaf) and
- # ask the keychain to give us a SecIdentityRef with that cert's associated
- # key.
- #
- # We'll then return a CFArray containing the trust chain: one
- # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
- # responsibility for freeing this CFArray will be with the caller. This
- # CFArray must remain alive for the entire connection, so in practice it
- # will be stored with a single SSLSocket, along with the reference to the
- # keychain.
- certificates = []
- identities = []
-
- # Filter out bad paths.
- paths = (path for path in paths if path)
-
- try:
- for file_path in paths:
- new_identities, new_certs = _load_items_from_file(
- keychain, file_path
- )
- identities.extend(new_identities)
- certificates.extend(new_certs)
-
- # Ok, we have everything. The question is: do we have an identity? If
- # not, we want to grab one from the first cert we have.
- if not identities:
- new_identity = Security.SecIdentityRef()
- status = Security.SecIdentityCreateWithCertificate(
- keychain,
- certificates[0],
- ctypes.byref(new_identity)
- )
- _assert_no_error(status)
- identities.append(new_identity)
-
- # We now want to release the original certificate, as we no longer
- # need it.
- CoreFoundation.CFRelease(certificates.pop(0))
-
- # We now need to build a new CFArray that holds the trust chain.
- trust_chain = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- for item in itertools.chain(identities, certificates):
- # ArrayAppendValue does a CFRetain on the item. That's fine,
- # because the finally block will release our other refs to them.
- CoreFoundation.CFArrayAppendValue(trust_chain, item)
-
- return trust_chain
- finally:
- for obj in itertools.chain(identities, certificates):
- CoreFoundation.CFRelease(obj)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
deleted file mode 100644
index e74589fa8..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-This module provides a pool manager that uses Google App Engine's
-`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
-Example usage::
-
- from urllib3 import PoolManager
- from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
-
- if is_appengine_sandbox():
- # AppEngineManager uses AppEngine's URLFetch API behind the scenes
- http = AppEngineManager()
- else:
- # PoolManager uses a socket-level API behind the scenes
- http = PoolManager()
-
- r = http.request('GET', 'https://google.com/')
-
-There are `limitations <https://cloud.google.com/appengine/docs/python/\
-urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
-the best choice for your application. There are three options for using
-urllib3 on Google App Engine:
-
-1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
- cost-effective in many circumstances as long as your usage is within the
- limitations.
-2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
- Sockets also have `limitations and restrictions
- <https://cloud.google.com/appengine/docs/python/sockets/\
- #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
- To use sockets, be sure to specify the following in your ``app.yaml``::
-
- env_variables:
- GAE_USE_SOCKETS_HTTPLIB : 'true'
-
-3. If you are using `App Engine Flexible
-<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
-:class:`PoolManager` without any configuration or special environment variables.
-"""
-
-from __future__ import absolute_import
-import logging
-import os
-import warnings
-from ..packages.six.moves.urllib.parse import urljoin
-
-from ..exceptions import (
- HTTPError,
- HTTPWarning,
- MaxRetryError,
- ProtocolError,
- TimeoutError,
- SSLError
-)
-
-from ..packages.six import BytesIO
-from ..request import RequestMethods
-from ..response import HTTPResponse
-from ..util.timeout import Timeout
-from ..util.retry import Retry
-
-try:
- from google.appengine.api import urlfetch
-except ImportError:
- urlfetch = None
-
-
-log = logging.getLogger(__name__)
-
-
-class AppEnginePlatformWarning(HTTPWarning):
- pass
-
-
-class AppEnginePlatformError(HTTPError):
- pass
-
-
-class AppEngineManager(RequestMethods):
- """
- Connection manager for Google App Engine sandbox applications.
-
- This manager uses the URLFetch service directly instead of using the
- emulated httplib, and is subject to URLFetch limitations as described in
- the App Engine documentation `here
- <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
- Notably it will raise an :class:`AppEnginePlatformError` if:
- * URLFetch is not available.
- * If you attempt to use this on App Engine Flexible, as full socket
- support is available.
- * If a request size is more than 10 megabytes.
- * If a response size is more than 32 megabtyes.
- * If you use an unsupported request method such as OPTIONS.
-
- Beyond those cases, it will raise normal urllib3 errors.
- """
-
- def __init__(self, headers=None, retries=None, validate_certificate=True,
- urlfetch_retries=True):
- if not urlfetch:
- raise AppEnginePlatformError(
- "URLFetch is not available in this environment.")
-
- if is_prod_appengine_mvms():
- raise AppEnginePlatformError(
- "Use normal urllib3.PoolManager instead of AppEngineManager"
- "on Managed VMs, as using URLFetch is not necessary in "
- "this environment.")
-
- warnings.warn(
- "urllib3 is using URLFetch on Google App Engine sandbox instead "
- "of sockets. To use sockets directly instead of URLFetch see "
- "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
- AppEnginePlatformWarning)
-
- RequestMethods.__init__(self, headers)
- self.validate_certificate = validate_certificate
- self.urlfetch_retries = urlfetch_retries
-
- self.retries = retries or Retry.DEFAULT
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- # Return False to re-raise any potential exceptions
- return False
-
- def urlopen(self, method, url, body=None, headers=None,
- retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
- **response_kw):
-
- retries = self._get_retries(retries, redirect)
-
- try:
- follow_redirects = (
- redirect and
- retries.redirect != 0 and
- retries.total)
- response = urlfetch.fetch(
- url,
- payload=body,
- method=method,
- headers=headers or {},
- allow_truncated=False,
- follow_redirects=self.urlfetch_retries and follow_redirects,
- deadline=self._get_absolute_timeout(timeout),
- validate_certificate=self.validate_certificate,
- )
- except urlfetch.DeadlineExceededError as e:
- raise TimeoutError(self, e)
-
- except urlfetch.InvalidURLError as e:
- if 'too large' in str(e):
- raise AppEnginePlatformError(
- "URLFetch request too large, URLFetch only "
- "supports requests up to 10mb in size.", e)
- raise ProtocolError(e)
-
- except urlfetch.DownloadError as e:
- if 'Too many redirects' in str(e):
- raise MaxRetryError(self, url, reason=e)
- raise ProtocolError(e)
-
- except urlfetch.ResponseTooLargeError as e:
- raise AppEnginePlatformError(
- "URLFetch response too large, URLFetch only supports"
- "responses up to 32mb in size.", e)
-
- except urlfetch.SSLCertificateError as e:
- raise SSLError(e)
-
- except urlfetch.InvalidMethodError as e:
- raise AppEnginePlatformError(
- "URLFetch does not support method: %s" % method, e)
-
- http_response = self._urlfetch_response_to_http_response(
- response, retries=retries, **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and http_response.get_redirect_location()
- if redirect_location:
- # Check for redirect response
- if (self.urlfetch_retries and retries.raise_on_redirect):
- raise MaxRetryError(self, url, "too many redirects")
- else:
- if http_response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=http_response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise MaxRetryError(self, url, "too many redirects")
- return http_response
-
- retries.sleep_for_retry(http_response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- redirect_url = urljoin(url, redirect_location)
- return self.urlopen(
- method, redirect_url, body, headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(http_response.getheader('Retry-After'))
- if retries.is_retry(method, http_response.status, has_retry_after):
- retries = retries.increment(
- method, url, response=http_response, _pool=self)
- log.debug("Retry: %s", url)
- retries.sleep(http_response)
- return self.urlopen(
- method, url,
- body=body, headers=headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- return http_response
-
- def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
-
- if is_prod_appengine():
- # Production GAE handles deflate encoding automatically, but does
- # not remove the encoding header.
- content_encoding = urlfetch_resp.headers.get('content-encoding')
-
- if content_encoding == 'deflate':
- del urlfetch_resp.headers['content-encoding']
-
- transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
- # We have a full response's content,
- # so let's make sure we don't report ourselves as chunked data.
- if transfer_encoding == 'chunked':
- encodings = transfer_encoding.split(",")
- encodings.remove('chunked')
- urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
-
- return HTTPResponse(
- # In order for decoding to work, we must present the content as
- # a file-like object.
- body=BytesIO(urlfetch_resp.content),
- headers=urlfetch_resp.headers,
- status=urlfetch_resp.status_code,
- **response_kw
- )
-
- def _get_absolute_timeout(self, timeout):
- if timeout is Timeout.DEFAULT_TIMEOUT:
- return None # Defer to URLFetch's default.
- if isinstance(timeout, Timeout):
- if timeout._read is not None or timeout._connect is not None:
- warnings.warn(
- "URLFetch does not support granular timeout settings, "
- "reverting to total or default URLFetch timeout.",
- AppEnginePlatformWarning)
- return timeout.total
- return timeout
-
- def _get_retries(self, retries, redirect):
- if not isinstance(retries, Retry):
- retries = Retry.from_int(
- retries, redirect=redirect, default=self.retries)
-
- if retries.connect or retries.read or retries.redirect:
- warnings.warn(
- "URLFetch only supports total retries and does not "
- "recognize connect, read, or redirect retry parameters.",
- AppEnginePlatformWarning)
-
- return retries
-
-
-def is_appengine():
- return (is_local_appengine() or
- is_prod_appengine() or
- is_prod_appengine_mvms())
-
-
-def is_appengine_sandbox():
- return is_appengine() and not is_prod_appengine_mvms()
-
-
-def is_local_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Development/' in os.environ['SERVER_SOFTWARE'])
-
-
-def is_prod_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
- not is_prod_appengine_mvms())
-
-
-def is_prod_appengine_mvms():
- return os.environ.get('GAE_VM', False) == 'true'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
deleted file mode 100644
index 3f8c9ebf5..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-NTLM authenticating pool, contributed by erikcederstran
-
-Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
-"""
-from __future__ import absolute_import
-
-from logging import getLogger
-from ntlm import ntlm
-
-from .. import HTTPSConnectionPool
-from ..packages.six.moves.http_client import HTTPSConnection
-
-
-log = getLogger(__name__)
-
-
-class NTLMConnectionPool(HTTPSConnectionPool):
- """
- Implements an NTLM authentication version of an urllib3 connection pool
- """
-
- scheme = 'https'
-
- def __init__(self, user, pw, authurl, *args, **kwargs):
- """
- authurl is a random URL on the server that is protected by NTLM.
- user is the Windows user, probably in the DOMAIN\\username format.
- pw is the password for the user.
- """
- super(NTLMConnectionPool, self).__init__(*args, **kwargs)
- self.authurl = authurl
- self.rawuser = user
- user_parts = user.split('\\', 1)
- self.domain = user_parts[0].upper()
- self.user = user_parts[1]
- self.pw = pw
-
- def _new_conn(self):
- # Performs the NTLM handshake that secures the connection. The socket
- # must be kept open while requests are performed.
- self.num_connections += 1
- log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
- self.num_connections, self.host, self.authurl)
-
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- req_header = 'Authorization'
- resp_header = 'www-authenticate'
-
- conn = HTTPSConnection(host=self.host, port=self.port)
-
- # Send negotiation message
- headers[req_header] = (
- 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- reshdr = dict(res.getheaders())
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', reshdr)
- log.debug('Response data: %s [...]', res.read(100))
-
- # Remove the reference to the socket, so that it can not be closed by
- # the response object (we want to keep the socket open)
- res.fp = None
-
- # Server should respond with a challenge message
- auth_header_values = reshdr[resp_header].split(', ')
- auth_header_value = None
- for s in auth_header_values:
- if s[:5] == 'NTLM ':
- auth_header_value = s[5:]
- if auth_header_value is None:
- raise Exception('Unexpected %s response header: %s' %
- (resp_header, reshdr[resp_header]))
-
- # Send authentication message
- ServerChallenge, NegotiateFlags = \
- ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
- auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
- self.user,
- self.domain,
- self.pw,
- NegotiateFlags)
- headers[req_header] = 'NTLM %s' % auth_msg
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', dict(res.getheaders()))
- log.debug('Response data: %s [...]', res.read()[:100])
- if res.status != 200:
- if res.status == 401:
- raise Exception('Server rejected request: wrong '
- 'username or password')
- raise Exception('Wrong server response: %s %s' %
- (res.status, res.reason))
-
- res.fp = None
- log.debug('Connection established')
- return conn
-
- def urlopen(self, method, url, body=None, headers=None, retries=3,
- redirect=True, assert_same_host=True):
- if headers is None:
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- return super(NTLMConnectionPool, self).urlopen(method, url, body,
- headers, retries,
- redirect,
- assert_same_host)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
deleted file mode 100644
index 8d373507d..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
+++ /dev/null
@@ -1,458 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-SSL with SNI_-support for Python 2. Follow these instructions if you would
-like to verify SSL certificates in Python 2. Note, the default libraries do
-*not* do certificate checking; you need to do additional work to validate
-certificates yourself.
-
-This needs the following packages installed:
-
-* pyOpenSSL (tested with 16.0.0)
-* cryptography (minimum 1.3.4, from pyopenssl)
-* idna (minimum 2.0, from cryptography)
-
-However, pyopenssl depends on cryptography, which depends on idna, so while we
-use all three directly here we end up having relatively few packages required.
-
-You can install them with the following command:
-
- pip install pyopenssl cryptography idna
-
-To activate certificate checking, call
-:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
-before you begin making HTTP requests. This can be done in a ``sitecustomize``
-module, or at any other time before your application begins using ``urllib3``,
-like this::
-
- try:
- import urllib3.contrib.pyopenssl
- urllib3.contrib.pyopenssl.inject_into_urllib3()
- except ImportError:
- pass
-
-Now you can use :mod:`urllib3` as you normally would, and it will support SNI
-when the required modules are installed.
-
-Activating this module also has the positive side effect of disabling SSL/TLS
-compression in Python 2 (see `CRIME attack`_).
-
-If you want to configure the default list of supported cipher suites, you can
-set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
-
-.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
-.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
-"""
-from __future__ import absolute_import
-
-import OpenSSL.SSL
-from cryptography import x509
-from cryptography.hazmat.backends.openssl import backend as openssl_backend
-from cryptography.hazmat.backends.openssl.x509 import _Certificate
-
-from socket import timeout, error as SocketError
-from io import BytesIO
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-import logging
-import ssl
-
-try:
- import six
-except ImportError:
- from ..packages import six
-
-import sys
-
-from .. import util
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works.
-HAS_SNI = True
-
-# Map from urllib3 to PyOpenSSL compatible parameter-values.
-_openssl_versions = {
- ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
- ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
-}
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
-
-try:
- _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
-except AttributeError:
- pass
-
-_stdlib_to_openssl_verify = {
- ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
- ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
- ssl.CERT_REQUIRED:
- OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
-}
-_openssl_to_stdlib_verify = dict(
- (v, k) for k, v in _stdlib_to_openssl_verify.items()
-)
-
-# OpenSSL will only write 16K at a time
-SSL_WRITE_BLOCKSIZE = 16384
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-
-log = logging.getLogger(__name__)
-
-
-def inject_into_urllib3():
- 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
-
- _validate_dependencies_met()
-
- util.ssl_.SSLContext = PyOpenSSLContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_PYOPENSSL = True
- util.ssl_.IS_PYOPENSSL = True
-
-
-def extract_from_urllib3():
- 'Undo monkey-patching by :func:`inject_into_urllib3`.'
-
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_PYOPENSSL = False
- util.ssl_.IS_PYOPENSSL = False
-
-
-def _validate_dependencies_met():
- """
- Verifies that PyOpenSSL's package-level dependencies have been met.
- Throws `ImportError` if they are not met.
- """
- # Method added in `cryptography==1.1`; not available in older versions
- from cryptography.x509.extensions import Extensions
- if getattr(Extensions, "get_extension_for_class", None) is None:
- raise ImportError("'cryptography' module missing required functionality. "
- "Try upgrading to v1.3.4 or newer.")
-
- # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
- # attribute is only present on those versions.
- from OpenSSL.crypto import X509
- x509 = X509()
- if getattr(x509, "_x509", None) is None:
- raise ImportError("'pyOpenSSL' module missing required functionality. "
- "Try upgrading to v0.14 or newer.")
-
-
-def _dnsname_to_stdlib(name):
- """
- Converts a dNSName SubjectAlternativeName field to the form used by the
- standard library on the given Python version.
-
- Cryptography produces a dNSName as a unicode string that was idna-decoded
- from ASCII bytes. We need to idna-encode that string to get it back, and
- then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
- uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
- """
- def idna_encode(name):
- """
- Borrowed wholesale from the Python Cryptography Project. It turns out
- that we can't just safely call `idna.encode`: it can explode for
- wildcard names. This avoids that problem.
- """
- import idna
-
- for prefix in [u'*.', u'.']:
- if name.startswith(prefix):
- name = name[len(prefix):]
- return prefix.encode('ascii') + idna.encode(name)
- return idna.encode(name)
-
- name = idna_encode(name)
- if sys.version_info >= (3, 0):
- name = name.decode('utf-8')
- return name
-
-
-def get_subj_alt_name(peer_cert):
- """
- Given an PyOpenSSL certificate, provides all the subject alternative names.
- """
- # Pass the cert to cryptography, which has much better APIs for this.
- # This is technically using private APIs, but should work across all
- # relevant versions until PyOpenSSL gets something proper for this.
- cert = _Certificate(openssl_backend, peer_cert._x509)
-
- # We want to find the SAN extension. Ask Cryptography to locate it (it's
- # faster than looping in Python)
- try:
- ext = cert.extensions.get_extension_for_class(
- x509.SubjectAlternativeName
- ).value
- except x509.ExtensionNotFound:
- # No such extension, return the empty list.
- return []
- except (x509.DuplicateExtension, x509.UnsupportedExtension,
- x509.UnsupportedGeneralNameType, UnicodeError) as e:
- # A problem has been found with the quality of the certificate. Assume
- # no SAN field is present.
- log.warning(
- "A problem was encountered with the certificate that prevented "
- "urllib3 from finding the SubjectAlternativeName field. This can "
- "affect certificate validation. The error was %s",
- e,
- )
- return []
-
- # We want to return dNSName and iPAddress fields. We need to cast the IPs
- # back to strings because the match_hostname function wants them as
- # strings.
- # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
- # decoded. This is pretty frustrating, but that's what the standard library
- # does with certificates, and so we need to attempt to do the same.
- names = [
- ('DNS', _dnsname_to_stdlib(name))
- for name in ext.get_values_for_type(x509.DNSName)
- ]
- names.extend(
- ('IP Address', str(name))
- for name in ext.get_values_for_type(x509.IPAddress)
- )
-
- return names
-
-
-class WrappedSocket(object):
- '''API-compatibility wrapper for Python OpenSSL's Connection-class.
-
- Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
- collector of pypy.
- '''
-
- def __init__(self, connection, socket, suppress_ragged_eofs=True):
- self.connection = connection
- self.socket = socket
- self.suppress_ragged_eofs = suppress_ragged_eofs
- self._makefile_refs = 0
- self._closed = False
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, *args, **kwargs):
- try:
- data = self.connection.recv(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return b''
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return b''
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv(*args, **kwargs)
- else:
- return data
-
- def recv_into(self, *args, **kwargs):
- try:
- return self.connection.recv_into(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return 0
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return 0
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv_into(*args, **kwargs)
-
- def settimeout(self, timeout):
- return self.socket.settimeout(timeout)
-
- def _send_until_done(self, data):
- while True:
- try:
- return self.connection.send(data)
- except OpenSSL.SSL.WantWriteError:
- wr = util.wait_for_write(self.socket, self.socket.gettimeout())
- if not wr:
- raise timeout()
- continue
- except OpenSSL.SSL.SysCallError as e:
- raise SocketError(str(e))
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- # FIXME rethrow compatible exceptions should we ever use this
- self.connection.shutdown()
-
- def close(self):
- if self._makefile_refs < 1:
- try:
- self._closed = True
- return self.connection.close()
- except OpenSSL.SSL.Error:
- return
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- x509 = self.connection.get_peer_certificate()
-
- if not x509:
- return x509
-
- if binary_form:
- return OpenSSL.crypto.dump_certificate(
- OpenSSL.crypto.FILETYPE_ASN1,
- x509)
-
- return {
- 'subject': (
- (('commonName', x509.get_subject().CN),),
- ),
- 'subjectAltName': get_subj_alt_name(x509)
- }
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- makefile = backport_makefile
-
-WrappedSocket.makefile = makefile
-
-
-class PyOpenSSLContext(object):
- """
- I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
- for translating the interface of the standard library ``SSLContext`` object
- to calls into PyOpenSSL.
- """
- def __init__(self, protocol):
- self.protocol = _openssl_versions[protocol]
- self._ctx = OpenSSL.SSL.Context(self.protocol)
- self._options = 0
- self.check_hostname = False
-
- @property
- def options(self):
- return self._options
-
- @options.setter
- def options(self, value):
- self._options = value
- self._ctx.set_options(value)
-
- @property
- def verify_mode(self):
- return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._ctx.set_verify(
- _stdlib_to_openssl_verify[value],
- _verify_callback
- )
-
- def set_default_verify_paths(self):
- self._ctx.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- if isinstance(ciphers, six.text_type):
- ciphers = ciphers.encode('utf-8')
- self._ctx.set_cipher_list(ciphers)
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- if cafile is not None:
- cafile = cafile.encode('utf-8')
- if capath is not None:
- capath = capath.encode('utf-8')
- self._ctx.load_verify_locations(cafile, capath)
- if cadata is not None:
- self._ctx.load_verify_locations(BytesIO(cadata))
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._ctx.use_certificate_file(certfile)
- if password is not None:
- self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
- self._ctx.use_privatekey_file(keyfile or certfile)
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- cnx = OpenSSL.SSL.Connection(self._ctx, sock)
-
- if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
- server_hostname = server_hostname.encode('utf-8')
-
- if server_hostname is not None:
- cnx.set_tlsext_host_name(server_hostname)
-
- cnx.set_connect_state()
-
- while True:
- try:
- cnx.do_handshake()
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(sock, sock.gettimeout())
- if not rd:
- raise timeout('select timed out')
- continue
- except OpenSSL.SSL.Error as e:
- raise ssl.SSLError('bad handshake: %r' % e)
- break
-
- return WrappedSocket(cnx, sock)
-
-
-def _verify_callback(cnx, x509, err_no, err_depth, return_code):
- return err_no == 0
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
deleted file mode 100644
index fcc30118c..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
+++ /dev/null
@@ -1,808 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-SecureTranport support for urllib3 via ctypes.
-
-This makes platform-native TLS available to urllib3 users on macOS without the
-use of a compiler. This is an important feature because the Python Package
-Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
-that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
-this is to give macOS users an alternative solution to the problem, and that
-solution is to use SecureTransport.
-
-We use ctypes here because this solution must not require a compiler. That's
-because pip is not allowed to require a compiler either.
-
-This is not intended to be a seriously long-term solution to this problem.
-The hope is that PEP 543 will eventually solve this issue for us, at which
-point we can retire this contrib module. But in the short term, we need to
-solve the impending tire fire that is Python on Mac without this kind of
-contrib module. So...here we are.
-
-To use this module, simply import and inject it::
-
- import urllib3.contrib.securetransport
- urllib3.contrib.securetransport.inject_into_urllib3()
-
-Happy TLSing!
-"""
-from __future__ import absolute_import
-
-import contextlib
-import ctypes
-import errno
-import os.path
-import shutil
-import socket
-import ssl
-import threading
-import weakref
-
-from .. import util
-from ._securetransport.bindings import (
- Security, SecurityConst, CoreFoundation
-)
-from ._securetransport.low_level import (
- _assert_no_error, _cert_array_from_pem, _temporary_keychain,
- _load_client_cert_chain
-)
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-try:
- memoryview(b'')
-except NameError:
- raise ImportError("SecureTransport only works on Pythons with memoryview")
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works
-HAS_SNI = True
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-# This dictionary is used by the read callback to obtain a handle to the
-# calling wrapped socket. This is a pretty silly approach, but for now it'll
-# do. I feel like I should be able to smuggle a handle to the wrapped socket
-# directly in the SSLConnectionRef, but for now this approach will work I
-# guess.
-#
-# We need to lock around this structure for inserts, but we don't do it for
-# reads/writes in the callbacks. The reasoning here goes as follows:
-#
-# 1. It is not possible to call into the callbacks before the dictionary is
-# populated, so once in the callback the id must be in the dictionary.
-# 2. The callbacks don't mutate the dictionary, they only read from it, and
-# so cannot conflict with any of the insertions.
-#
-# This is good: if we had to lock in the callbacks we'd drastically slow down
-# the performance of this code.
-_connection_refs = weakref.WeakValueDictionary()
-_connection_ref_lock = threading.Lock()
-
-# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
-# for no better reason than we need *a* limit, and this one is right there.
-SSL_WRITE_BLOCKSIZE = 16384
-
-# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
-# individual cipher suites. We need to do this becuase this is how
-# SecureTransport wants them.
-CIPHER_SUITES = [
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
-]
-
-# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
-# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
-_protocol_to_min_max = {
- ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
-}
-
-if hasattr(ssl, "PROTOCOL_SSLv2"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
- SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
- )
-if hasattr(ssl, "PROTOCOL_SSLv3"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
- SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
- )
-if hasattr(ssl, "PROTOCOL_TLSv1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
- SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
- SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_2"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
- SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
- )
-if hasattr(ssl, "PROTOCOL_TLS"):
- _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
-
-
-def inject_into_urllib3():
- """
- Monkey-patch urllib3 with SecureTransport-backed SSL-support.
- """
- util.ssl_.SSLContext = SecureTransportContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_SECURETRANSPORT = True
- util.ssl_.IS_SECURETRANSPORT = True
-
-
-def extract_from_urllib3():
- """
- Undo monkey-patching by :func:`inject_into_urllib3`.
- """
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_SECURETRANSPORT = False
- util.ssl_.IS_SECURETRANSPORT = False
-
-
-def _read_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport read callback. This is called by ST to request that data
- be returned from the socket.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- requested_length = data_length_pointer[0]
-
- timeout = wrapped_socket.gettimeout()
- error = None
- read_count = 0
- buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
- buffer_view = memoryview(buffer)
-
- try:
- while read_count < requested_length:
- if timeout is None or timeout >= 0:
- readables = util.wait_for_read([base_socket], timeout)
- if not readables:
- raise socket.error(errno.EAGAIN, 'timed out')
-
- # We need to tell ctypes that we have a buffer that can be
- # written to. Upsettingly, we do that like this:
- chunk_size = base_socket.recv_into(
- buffer_view[read_count:requested_length]
- )
- read_count += chunk_size
- if not chunk_size:
- if not read_count:
- return SecurityConst.errSSLClosedGraceful
- break
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = read_count
-
- if read_count != requested_length:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-def _write_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport write callback. This is called by ST to request that data
- actually be sent on the network.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- bytes_to_write = data_length_pointer[0]
- data = ctypes.string_at(data_buffer, bytes_to_write)
-
- timeout = wrapped_socket.gettimeout()
- error = None
- sent = 0
-
- try:
- while sent < bytes_to_write:
- if timeout is None or timeout >= 0:
- writables = util.wait_for_write([base_socket], timeout)
- if not writables:
- raise socket.error(errno.EAGAIN, 'timed out')
- chunk_sent = base_socket.send(data)
- sent += chunk_sent
-
- # This has some needless copying here, but I'm not sure there's
- # much value in optimising this data path.
- data = data[chunk_sent:]
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = sent
- if sent != bytes_to_write:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-# We need to keep these two objects references alive: if they get GC'd while
-# in use then SecureTransport could attempt to call a function that is in freed
-# memory. That would be...uh...bad. Yeah, that's the word. Bad.
-_read_callback_pointer = Security.SSLReadFunc(_read_callback)
-_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
-
-
-class WrappedSocket(object):
- """
- API-compatibility wrapper for Python's OpenSSL wrapped socket object.
-
- Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
- collector of PyPy.
- """
- def __init__(self, socket):
- self.socket = socket
- self.context = None
- self._makefile_refs = 0
- self._closed = False
- self._exception = None
- self._keychain = None
- self._keychain_dir = None
- self._client_cert_chain = None
-
- # We save off the previously-configured timeout and then set it to
- # zero. This is done because we use select and friends to handle the
- # timeouts, but if we leave the timeout set on the lower socket then
- # Python will "kindly" call select on that socket again for us. Avoid
- # that by forcing the timeout to zero.
- self._timeout = self.socket.gettimeout()
- self.socket.settimeout(0)
-
- @contextlib.contextmanager
- def _raise_on_error(self):
- """
- A context manager that can be used to wrap calls that do I/O from
- SecureTransport. If any of the I/O callbacks hit an exception, this
- context manager will correctly propagate the exception after the fact.
- This avoids silently swallowing those exceptions.
-
- It also correctly forces the socket closed.
- """
- self._exception = None
-
- # We explicitly don't catch around this yield because in the unlikely
- # event that an exception was hit in the block we don't want to swallow
- # it.
- yield
- if self._exception is not None:
- exception, self._exception = self._exception, None
- self.close()
- raise exception
-
- def _set_ciphers(self):
- """
- Sets up the allowed ciphers. By default this matches the set in
- util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
- custom and doesn't allow changing at this time, mostly because parsing
- OpenSSL cipher strings is going to be a freaking nightmare.
- """
- ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
- result = Security.SSLSetEnabledCiphers(
- self.context, ciphers, len(CIPHER_SUITES)
- )
- _assert_no_error(result)
-
- def _custom_validate(self, verify, trust_bundle):
- """
- Called when we have set custom validation. We do this in two cases:
- first, when cert validation is entirely disabled; and second, when
- using a custom trust DB.
- """
- # If we disabled cert validation, just say: cool.
- if not verify:
- return
-
- # We want data in memory, so load it up.
- if os.path.isfile(trust_bundle):
- with open(trust_bundle, 'rb') as f:
- trust_bundle = f.read()
-
- cert_array = None
- trust = Security.SecTrustRef()
-
- try:
- # Get a CFArray that contains the certs we want.
- cert_array = _cert_array_from_pem(trust_bundle)
-
- # Ok, now the hard part. We want to get the SecTrustRef that ST has
- # created for this connection, shove our CAs into it, tell ST to
- # ignore everything else it knows, and then ask if it can build a
- # chain. This is a buuuunch of code.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- raise ssl.SSLError("Failed to copy trust reference")
-
- result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
- _assert_no_error(result)
-
- result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
- _assert_no_error(result)
-
- trust_result = Security.SecTrustResultType()
- result = Security.SecTrustEvaluate(
- trust, ctypes.byref(trust_result)
- )
- _assert_no_error(result)
- finally:
- if trust:
- CoreFoundation.CFRelease(trust)
-
- if cert_array is None:
- CoreFoundation.CFRelease(cert_array)
-
- # Ok, now we can look at what the result was.
- successes = (
- SecurityConst.kSecTrustResultUnspecified,
- SecurityConst.kSecTrustResultProceed
- )
- if trust_result.value not in successes:
- raise ssl.SSLError(
- "certificate verify failed, error code: %d" %
- trust_result.value
- )
-
- def handshake(self,
- server_hostname,
- verify,
- trust_bundle,
- min_version,
- max_version,
- client_cert,
- client_key,
- client_key_passphrase):
- """
- Actually performs the TLS handshake. This is run automatically by
- wrapped socket, and shouldn't be needed in user code.
- """
- # First, we do the initial bits of connection setup. We need to create
- # a context, set its I/O funcs, and set the connection reference.
- self.context = Security.SSLCreateContext(
- None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
- )
- result = Security.SSLSetIOFuncs(
- self.context, _read_callback_pointer, _write_callback_pointer
- )
- _assert_no_error(result)
-
- # Here we need to compute the handle to use. We do this by taking the
- # id of self modulo 2**31 - 1. If this is already in the dictionary, we
- # just keep incrementing by one until we find a free space.
- with _connection_ref_lock:
- handle = id(self) % 2147483647
- while handle in _connection_refs:
- handle = (handle + 1) % 2147483647
- _connection_refs[handle] = self
-
- result = Security.SSLSetConnection(self.context, handle)
- _assert_no_error(result)
-
- # If we have a server hostname, we should set that too.
- if server_hostname:
- if not isinstance(server_hostname, bytes):
- server_hostname = server_hostname.encode('utf-8')
-
- result = Security.SSLSetPeerDomainName(
- self.context, server_hostname, len(server_hostname)
- )
- _assert_no_error(result)
-
- # Setup the ciphers.
- self._set_ciphers()
-
- # Set the minimum and maximum TLS versions.
- result = Security.SSLSetProtocolVersionMin(self.context, min_version)
- _assert_no_error(result)
- result = Security.SSLSetProtocolVersionMax(self.context, max_version)
- _assert_no_error(result)
-
- # If there's a trust DB, we need to use it. We do that by telling
- # SecureTransport to break on server auth. We also do that if we don't
- # want to validate the certs at all: we just won't actually do any
- # authing in that case.
- if not verify or trust_bundle is not None:
- result = Security.SSLSetSessionOption(
- self.context,
- SecurityConst.kSSLSessionOptionBreakOnServerAuth,
- True
- )
- _assert_no_error(result)
-
- # If there's a client cert, we need to use it.
- if client_cert:
- self._keychain, self._keychain_dir = _temporary_keychain()
- self._client_cert_chain = _load_client_cert_chain(
- self._keychain, client_cert, client_key
- )
- result = Security.SSLSetCertificate(
- self.context, self._client_cert_chain
- )
- _assert_no_error(result)
-
- while True:
- with self._raise_on_error():
- result = Security.SSLHandshake(self.context)
-
- if result == SecurityConst.errSSLWouldBlock:
- raise socket.timeout("handshake timed out")
- elif result == SecurityConst.errSSLServerAuthCompleted:
- self._custom_validate(verify, trust_bundle)
- continue
- else:
- _assert_no_error(result)
- break
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, bufsiz):
- buffer = ctypes.create_string_buffer(bufsiz)
- bytes_read = self.recv_into(buffer, bufsiz)
- data = buffer[:bytes_read]
- return data
-
- def recv_into(self, buffer, nbytes=None):
- # Read short on EOF.
- if self._closed:
- return 0
-
- if nbytes is None:
- nbytes = len(buffer)
-
- buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLRead(
- self.context, buffer, nbytes, ctypes.byref(processed_bytes)
- )
-
- # There are some result codes that we want to treat as "not always
- # errors". Specifically, those are errSSLWouldBlock,
- # errSSLClosedGraceful, and errSSLClosedNoNotify.
- if (result == SecurityConst.errSSLWouldBlock):
- # If we didn't process any bytes, then this was just a time out.
- # However, we can get errSSLWouldBlock in situations when we *did*
- # read some data, and in those cases we should just read "short"
- # and return.
- if processed_bytes.value == 0:
- # Timed out, no data read.
- raise socket.timeout("recv timed out")
- elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
- # The remote peer has closed this connection. We should do so as
- # well. Note that we don't actually return here because in
- # principle this could actually be fired along with return data.
- # It's unlikely though.
- self.close()
- else:
- _assert_no_error(result)
-
- # Ok, we read and probably succeeded. We should return whatever data
- # was actually read.
- return processed_bytes.value
-
- def settimeout(self, timeout):
- self._timeout = timeout
-
- def gettimeout(self):
- return self._timeout
-
- def send(self, data):
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLWrite(
- self.context, data, len(data), ctypes.byref(processed_bytes)
- )
-
- if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
- # Timed out
- raise socket.timeout("send timed out")
- else:
- _assert_no_error(result)
-
- # We sent, and probably succeeded. Tell them how much we sent.
- return processed_bytes.value
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- with self._raise_on_error():
- Security.SSLClose(self.context)
-
- def close(self):
- # TODO: should I do clean shutdown here? Do I have to?
- if self._makefile_refs < 1:
- self._closed = True
- if self.context:
- CoreFoundation.CFRelease(self.context)
- self.context = None
- if self._client_cert_chain:
- CoreFoundation.CFRelease(self._client_cert_chain)
- self._client_cert_chain = None
- if self._keychain:
- Security.SecKeychainDelete(self._keychain)
- CoreFoundation.CFRelease(self._keychain)
- shutil.rmtree(self._keychain_dir)
- self._keychain = self._keychain_dir = None
- return self.socket.close()
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- # Urgh, annoying.
- #
- # Here's how we do this:
- #
- # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
- # connection.
- # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
- # 3. To get the CN, call SecCertificateCopyCommonName and process that
- # string so that it's of the appropriate type.
- # 4. To get the SAN, we need to do something a bit more complex:
- # a. Call SecCertificateCopyValues to get the data, requesting
- # kSecOIDSubjectAltName.
- # b. Mess about with this dictionary to try to get the SANs out.
- #
- # This is gross. Really gross. It's going to be a few hundred LoC extra
- # just to repeat something that SecureTransport can *already do*. So my
- # operating assumption at this time is that what we want to do is
- # instead to just flag to urllib3 that it shouldn't do its own hostname
- # validation when using SecureTransport.
- if not binary_form:
- raise ValueError(
- "SecureTransport only supports dumping binary certs"
- )
- trust = Security.SecTrustRef()
- certdata = None
- der_bytes = None
-
- try:
- # Grab the trust store.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- # Probably we haven't done the handshake yet. No biggie.
- return None
-
- cert_count = Security.SecTrustGetCertificateCount(trust)
- if not cert_count:
- # Also a case that might happen if we haven't handshaked.
- # Handshook? Handshaken?
- return None
-
- leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
- assert leaf
-
- # Ok, now we want the DER bytes.
- certdata = Security.SecCertificateCopyData(leaf)
- assert certdata
-
- data_length = CoreFoundation.CFDataGetLength(certdata)
- data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
- der_bytes = ctypes.string_at(data_buffer, data_length)
- finally:
- if certdata:
- CoreFoundation.CFRelease(certdata)
- if trust:
- CoreFoundation.CFRelease(trust)
-
- return der_bytes
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- def makefile(self, mode="r", buffering=None, *args, **kwargs):
- # We disable buffering with SecureTransport because it conflicts with
- # the buffering that ST does internally (see issue #1153 for more).
- buffering = 0
- return backport_makefile(self, mode, buffering, *args, **kwargs)
-
-WrappedSocket.makefile = makefile
-
-
-class SecureTransportContext(object):
- """
- I am a wrapper class for the SecureTransport library, to translate the
- interface of the standard library ``SSLContext`` object to calls into
- SecureTransport.
- """
- def __init__(self, protocol):
- self._min_version, self._max_version = _protocol_to_min_max[protocol]
- self._options = 0
- self._verify = False
- self._trust_bundle = None
- self._client_cert = None
- self._client_key = None
- self._client_key_passphrase = None
-
- @property
- def check_hostname(self):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- return True
-
- @check_hostname.setter
- def check_hostname(self, value):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- pass
-
- @property
- def options(self):
- # TODO: Well, crap.
- #
- # So this is the bit of the code that is the most likely to cause us
- # trouble. Essentially we need to enumerate all of the SSL options that
- # users might want to use and try to see if we can sensibly translate
- # them, or whether we should just ignore them.
- return self._options
-
- @options.setter
- def options(self, value):
- # TODO: Update in line with above.
- self._options = value
-
- @property
- def verify_mode(self):
- return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._verify = True if value == ssl.CERT_REQUIRED else False
-
- def set_default_verify_paths(self):
- # So, this has to do something a bit weird. Specifically, what it does
- # is nothing.
- #
- # This means that, if we had previously had load_verify_locations
- # called, this does not undo that. We need to do that because it turns
- # out that the rest of the urllib3 code will attempt to load the
- # default verify paths if it hasn't been told about any paths, even if
- # the context itself was sometime earlier. We resolve that by just
- # ignoring it.
- pass
-
- def load_default_certs(self):
- return self.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- # For now, we just require the default cipher string.
- if ciphers != util.ssl_.DEFAULT_CIPHERS:
- raise ValueError(
- "SecureTransport doesn't support custom cipher strings"
- )
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- # OK, we only really support cadata and cafile.
- if capath is not None:
- raise ValueError(
- "SecureTransport does not support cert directories"
- )
-
- self._trust_bundle = cafile or cadata
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._client_cert = certfile
- self._client_key = keyfile
- self._client_cert_passphrase = password
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- # So, what do we do here? Firstly, we assert some properties. This is a
- # stripped down shim, so there is some functionality we don't support.
- # See PEP 543 for the real deal.
- assert not server_side
- assert do_handshake_on_connect
- assert suppress_ragged_eofs
-
- # Ok, we're good to go. Now we want to create the wrapped socket object
- # and store it in the appropriate place.
- wrapped_socket = WrappedSocket(sock)
-
- # Now we can handshake
- wrapped_socket.handshake(
- server_hostname, self._verify, self._trust_bundle,
- self._min_version, self._max_version, self._client_cert,
- self._client_key, self._client_key_passphrase
- )
- return wrapped_socket
diff --git a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
deleted file mode 100644
index 1cb79285b..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: MIT
-"""
-This module contains provisional support for SOCKS proxies from within
-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
-SOCKS5. To enable its functionality, either install PySocks or install this
-module with the ``socks`` extra.
-
-The SOCKS implementation supports the full range of urllib3 features. It also
-supports the following SOCKS features:
-
-- SOCKS4
-- SOCKS4a
-- SOCKS5
-- Usernames and passwords for the SOCKS proxy
-
-Known Limitations:
-
-- Currently PySocks does not support contacting remote websites via literal
- IPv6 addresses. Any such connection attempt will fail. You must use a domain
- name.
-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
- such connection attempt will fail.
-"""
-from __future__ import absolute_import
-
-try:
- import socks
-except ImportError:
- import warnings
- from ..exceptions import DependencyWarning
-
- warnings.warn((
- 'SOCKS support in urllib3 requires the installation of optional '
- 'dependencies: specifically, PySocks. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
- ),
- DependencyWarning
- )
- raise
-
-from socket import error as SocketError, timeout as SocketTimeout
-
-from ..connection import (
- HTTPConnection, HTTPSConnection
-)
-from ..connectionpool import (
- HTTPConnectionPool, HTTPSConnectionPool
-)
-from ..exceptions import ConnectTimeoutError, NewConnectionError
-from ..poolmanager import PoolManager
-from ..util.url import parse_url
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-
-class SOCKSConnection(HTTPConnection):
- """
- A plain-text HTTP connection that connects via a SOCKS proxy.
- """
- def __init__(self, *args, **kwargs):
- self._socks_options = kwargs.pop('_socks_options')
- super(SOCKSConnection, self).__init__(*args, **kwargs)
-
- def _new_conn(self):
- """
- Establish a new connection via the SOCKS proxy.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = socks.create_connection(
- (self.host, self.port),
- proxy_type=self._socks_options['socks_version'],
- proxy_addr=self._socks_options['proxy_host'],
- proxy_port=self._socks_options['proxy_port'],
- proxy_username=self._socks_options['username'],
- proxy_password=self._socks_options['password'],
- proxy_rdns=self._socks_options['rdns'],
- timeout=self.timeout,
- **extra_kw
- )
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except socks.ProxyError as e:
- # This is fragile as hell, but it seems to be the only way to raise
- # useful errors here.
- if e.socket_err:
- error = e.socket_err
- if isinstance(error, SocketTimeout):
- raise ConnectTimeoutError(
- self,
- "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout)
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % error
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % e
- )
-
- except SocketError as e: # Defensive: PySocks should catch all these.
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
-
-# We don't need to duplicate the Verified/Unverified distinction from
-# urllib3/connection.py here because the HTTPSConnection will already have been
-# correctly set to either the Verified or Unverified form by that module. This
-# means the SOCKSHTTPSConnection will automatically be the correct type.
-class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
- pass
-
-
-class SOCKSHTTPConnectionPool(HTTPConnectionPool):
- ConnectionCls = SOCKSConnection
-
-
-class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
- ConnectionCls = SOCKSHTTPSConnection
-
-
-class SOCKSProxyManager(PoolManager):
- """
- A version of the urllib3 ProxyManager that routes connections via the
- defined SOCKS proxy.
- """
- pool_classes_by_scheme = {
- 'http': SOCKSHTTPConnectionPool,
- 'https': SOCKSHTTPSConnectionPool,
- }
-
- def __init__(self, proxy_url, username=None, password=None,
- num_pools=10, headers=None, **connection_pool_kw):
- parsed = parse_url(proxy_url)
-
- if parsed.scheme == 'socks5':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = False
- elif parsed.scheme == 'socks5h':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = True
- elif parsed.scheme == 'socks4':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = False
- elif parsed.scheme == 'socks4a':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = True
- else:
- raise ValueError(
- "Unable to determine SOCKS version from %s" % proxy_url
- )
-
- self.proxy_url = proxy_url
-
- socks_options = {
- 'socks_version': socks_version,
- 'proxy_host': parsed.host,
- 'proxy_port': parsed.port,
- 'username': username,
- 'password': password,
- 'rdns': rdns
- }
- connection_pool_kw['_socks_options'] = socks_options
-
- super(SOCKSProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw
- )
-
- self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
deleted file mode 100644
index a71cabe06..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from .packages.six.moves.http_client import (
- IncompleteRead as httplib_IncompleteRead
-)
-# Base Exceptions
-
-
-class HTTPError(Exception):
- "Base exception used by this module."
- pass
-
-
-class HTTPWarning(Warning):
- "Base warning used by this module."
- pass
-
-
-class PoolError(HTTPError):
- "Base exception for errors caused within a pool."
- def __init__(self, pool, message):
- self.pool = pool
- HTTPError.__init__(self, "%s: %s" % (pool, message))
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, None)
-
-
-class RequestError(PoolError):
- "Base exception for PoolErrors that have associated URLs."
- def __init__(self, pool, url, message):
- self.url = url
- PoolError.__init__(self, pool, message)
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, self.url, None)
-
-
-class SSLError(HTTPError):
- "Raised when SSL certificate fails in an HTTPS connection."
- pass
-
-
-class ProxyError(HTTPError):
- "Raised when the connection to a proxy fails."
- pass
-
-
-class DecodeError(HTTPError):
- "Raised when automatic decoding based on Content-Type fails."
- pass
-
-
-class ProtocolError(HTTPError):
- "Raised when something unexpected happens mid-request/response."
- pass
-
-
-#: Renamed to ProtocolError but aliased for backwards compatibility.
-ConnectionError = ProtocolError
-
-
-# Leaf Exceptions
-
-class MaxRetryError(RequestError):
- """Raised when the maximum number of retries is exceeded.
-
- :param pool: The connection pool
- :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
- :param string url: The requested Url
- :param exceptions.Exception reason: The underlying error
-
- """
-
- def __init__(self, pool, url, reason=None):
- self.reason = reason
-
- message = "Max retries exceeded with url: %s (Caused by %r)" % (
- url, reason)
-
- RequestError.__init__(self, pool, url, message)
-
-
-class HostChangedError(RequestError):
- "Raised when an existing pool gets a request for a foreign host."
-
- def __init__(self, pool, url, retries=3):
- message = "Tried to open a foreign host with url: %s" % url
- RequestError.__init__(self, pool, url, message)
- self.retries = retries
-
-
-class TimeoutStateError(HTTPError):
- """ Raised when passing an invalid state to a timeout """
- pass
-
-
-class TimeoutError(HTTPError):
- """ Raised when a socket timeout error occurs.
-
- Catching this error will catch both :exc:`ReadTimeoutErrors
- <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
- """
- pass
-
-
-class ReadTimeoutError(TimeoutError, RequestError):
- "Raised when a socket timeout occurs while receiving data from a server"
- pass
-
-
-# This timeout error does not have a URL attached and needs to inherit from the
-# base HTTPError
-class ConnectTimeoutError(TimeoutError):
- "Raised when a socket timeout occurs while connecting to a server"
- pass
-
-
-class NewConnectionError(ConnectTimeoutError, PoolError):
- "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
- pass
-
-
-class EmptyPoolError(PoolError):
- "Raised when a pool runs out of connections and no more are allowed."
- pass
-
-
-class ClosedPoolError(PoolError):
- "Raised when a request enters a pool after the pool has been closed."
- pass
-
-
-class LocationValueError(ValueError, HTTPError):
- "Raised when there is something wrong with a given URL input."
- pass
-
-
-class LocationParseError(LocationValueError):
- "Raised when get_host or similar fails to parse the URL input."
-
- def __init__(self, location):
- message = "Failed to parse: %s" % location
- HTTPError.__init__(self, message)
-
- self.location = location
-
-
-class ResponseError(HTTPError):
- "Used as a container for an error reason supplied in a MaxRetryError."
- GENERIC_ERROR = 'too many error responses'
- SPECIFIC_ERROR = 'too many {status_code} error responses'
-
-
-class SecurityWarning(HTTPWarning):
- "Warned when perfoming security reducing actions"
- pass
-
-
-class SubjectAltNameWarning(SecurityWarning):
- "Warned when connecting to a host with a certificate missing a SAN."
- pass
-
-
-class InsecureRequestWarning(SecurityWarning):
- "Warned when making an unverified HTTPS request."
- pass
-
-
-class SystemTimeWarning(SecurityWarning):
- "Warned when system time is suspected to be wrong"
- pass
-
-
-class InsecurePlatformWarning(SecurityWarning):
- "Warned when certain SSL configuration is not available on a platform."
- pass
-
-
-class SNIMissingWarning(HTTPWarning):
- "Warned when making a HTTPS request without SNI available."
- pass
-
-
-class DependencyWarning(HTTPWarning):
- """
- Warned when an attempt is made to import a module with missing optional
- dependencies.
- """
- pass
-
-
-class ResponseNotChunked(ProtocolError, ValueError):
- "Response needs to be chunked in order to read it as chunks."
- pass
-
-
-class BodyNotHttplibCompatible(HTTPError):
- """
- Body should be httplib.HTTPResponse like (have an fp attribute which
- returns raw chunks) for read_chunked().
- """
- pass
-
-
-class IncompleteRead(HTTPError, httplib_IncompleteRead):
- """
- Response length doesn't match expected Content-Length
-
- Subclass of http_client.IncompleteRead to allow int value
- for `partial` to avoid creating large objects on streamed
- reads.
- """
- def __init__(self, partial, expected):
- super(IncompleteRead, self).__init__(partial, expected)
-
- def __repr__(self):
- return ('IncompleteRead(%i bytes read, '
- '%i more expected)' % (self.partial, self.expected))
-
-
-class InvalidHeader(HTTPError):
- "The header provided was somehow invalid."
- pass
-
-
-class ProxySchemeUnknown(AssertionError, ValueError):
- "ProxyManager does not support the supplied scheme"
- # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
-
- def __init__(self, scheme):
- message = "Not supported proxy scheme %s" % scheme
- super(ProxySchemeUnknown, self).__init__(message)
-
-
-class HeaderParsingError(HTTPError):
- "Raised by assert_header_parsing, but we convert it to a log.warning statement."
- def __init__(self, defects, unparsed_data):
- message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
- super(HeaderParsingError, self).__init__(message)
-
-
-class UnrewindableBodyError(HTTPError):
- "urllib3 encountered an error when trying to rewind a body"
- pass
diff --git a/collectors/python.d.plugin/python_modules/urllib3/fields.py b/collectors/python.d.plugin/python_modules/urllib3/fields.py
deleted file mode 100644
index de7577b74..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/fields.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import email.utils
-import mimetypes
-
-from .packages import six
-
-
-def guess_content_type(filename, default='application/octet-stream'):
- """
- Guess the "Content-Type" of a file.
-
- :param filename:
- The filename to guess the "Content-Type" of using :mod:`mimetypes`.
- :param default:
- If no "Content-Type" can be guessed, default to `default`.
- """
- if filename:
- return mimetypes.guess_type(filename)[0] or default
- return default
-
-
-def format_header_param(name, value):
- """
- Helper function to format and quote a single header parameter.
-
- Particularly useful for header parameters which might contain
- non-ASCII values, like file names. This follows RFC 2231, as
- suggested by RFC 2388 Section 4.4.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- if not any(ch in value for ch in '"\\\r\n'):
- result = '%s="%s"' % (name, value)
- try:
- result.encode('ascii')
- except (UnicodeEncodeError, UnicodeDecodeError):
- pass
- else:
- return result
- if not six.PY3 and isinstance(value, six.text_type): # Python 2:
- value = value.encode('utf-8')
- value = email.utils.encode_rfc2231(value, 'utf-8')
- value = '%s*=%s' % (name, value)
- return value
-
-
-class RequestField(object):
- """
- A data container for request body parameters.
-
- :param name:
- The name of this request field.
- :param data:
- The data/value body.
- :param filename:
- An optional filename of the request field.
- :param headers:
- An optional dict-like object of headers to initially use for the field.
- """
- def __init__(self, name, data, filename=None, headers=None):
- self._name = name
- self._filename = filename
- self.data = data
- self.headers = {}
- if headers:
- self.headers = dict(headers)
-
- @classmethod
- def from_tuples(cls, fieldname, value):
- """
- A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
-
- Supports constructing :class:`~urllib3.fields.RequestField` from
- parameter of key/value strings AND key/filetuple. A filetuple is a
- (filename, data, MIME type) tuple where the MIME type is optional.
- For example::
-
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
-
- Field names and filenames must be unicode.
- """
- if isinstance(value, tuple):
- if len(value) == 3:
- filename, data, content_type = value
- else:
- filename, data = value
- content_type = guess_content_type(filename)
- else:
- filename = None
- content_type = None
- data = value
-
- request_param = cls(fieldname, data, filename=filename)
- request_param.make_multipart(content_type=content_type)
-
- return request_param
-
- def _render_part(self, name, value):
- """
- Overridable helper function to format a single header parameter.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- return format_header_param(name, value)
-
- def _render_parts(self, header_parts):
- """
- Helper function to format and quote a single header.
-
- Useful for single headers that are composed of multiple items. E.g.,
- 'Content-Disposition' fields.
-
- :param header_parts:
- A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
- as `k1="v1"; k2="v2"; ...`.
- """
- parts = []
- iterable = header_parts
- if isinstance(header_parts, dict):
- iterable = header_parts.items()
-
- for name, value in iterable:
- if value is not None:
- parts.append(self._render_part(name, value))
-
- return '; '.join(parts)
-
- def render_headers(self):
- """
- Renders the headers for this request field.
- """
- lines = []
-
- sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
- for sort_key in sort_keys:
- if self.headers.get(sort_key, False):
- lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
-
- for header_name, header_value in self.headers.items():
- if header_name not in sort_keys:
- if header_value:
- lines.append('%s: %s' % (header_name, header_value))
-
- lines.append('\r\n')
- return '\r\n'.join(lines)
-
- def make_multipart(self, content_disposition=None, content_type=None,
- content_location=None):
- """
- Makes this request field into a multipart request field.
-
- This method overrides "Content-Disposition", "Content-Type" and
- "Content-Location" headers to the request parameter.
-
- :param content_type:
- The 'Content-Type' of the request body.
- :param content_location:
- The 'Content-Location' of the request body.
-
- """
- self.headers['Content-Disposition'] = content_disposition or 'form-data'
- self.headers['Content-Disposition'] += '; '.join([
- '', self._render_parts(
- (('name', self._name), ('filename', self._filename))
- )
- ])
- self.headers['Content-Type'] = content_type
- self.headers['Content-Location'] = content_location
diff --git a/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/collectors/python.d.plugin/python_modules/urllib3/filepost.py
deleted file mode 100644
index 3febc9cfe..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/filepost.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import codecs
-
-from uuid import uuid4
-from io import BytesIO
-
-from .packages import six
-from .packages.six import b
-from .fields import RequestField
-
-writer = codecs.lookup('utf-8')[3]
-
-
-def choose_boundary():
- """
- Our embarrassingly-simple replacement for mimetools.choose_boundary.
- """
- return uuid4().hex
-
-
-def iter_field_objects(fields):
- """
- Iterate over fields.
-
- Supports list of (k, v) tuples and dicts, and lists of
- :class:`~urllib3.fields.RequestField`.
-
- """
- if isinstance(fields, dict):
- i = six.iteritems(fields)
- else:
- i = iter(fields)
-
- for field in i:
- if isinstance(field, RequestField):
- yield field
- else:
- yield RequestField.from_tuples(*field)
-
-
-def iter_fields(fields):
- """
- .. deprecated:: 1.6
-
- Iterate over fields.
-
- The addition of :class:`~urllib3.fields.RequestField` makes this function
- obsolete. Instead, use :func:`iter_field_objects`, which returns
- :class:`~urllib3.fields.RequestField` objects.
-
- Supports list of (k, v) tuples and dicts.
- """
- if isinstance(fields, dict):
- return ((k, v) for k, v in six.iteritems(fields))
-
- return ((k, v) for k, v in fields)
-
-
-def encode_multipart_formdata(fields, boundary=None):
- """
- Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
-
- :param fields:
- Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
-
- :param boundary:
- If not specified, then a random boundary will be generated using
- :func:`mimetools.choose_boundary`.
- """
- body = BytesIO()
- if boundary is None:
- boundary = choose_boundary()
-
- for field in iter_field_objects(fields):
- body.write(b('--%s\r\n' % (boundary)))
-
- writer(body).write(field.render_headers())
- data = field.data
-
- if isinstance(data, int):
- data = str(data) # Backwards compatibility
-
- if isinstance(data, six.text_type):
- writer(body).write(data)
- else:
- body.write(data)
-
- body.write(b'\r\n')
-
- body.write(b('--%s--\r\n' % (boundary)))
-
- content_type = str('multipart/form-data; boundary=%s' % boundary)
-
- return body.getvalue(), content_type
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
deleted file mode 100644
index 170e974c1..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import absolute_import
-
-from . import ssl_match_hostname
-
-__all__ = ('ssl_match_hostname', )
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
+++ /dev/null
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
deleted file mode 100644
index 8ab122f8b..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: MIT
-"""
-backports.makefile
-~~~~~~~~~~~~~~~~~~
-
-Backports the Python 3 ``socket.makefile`` method for use with anything that
-wants to create a "fake" socket object.
-"""
-import io
-
-from socket import SocketIO
-
-
-def backport_makefile(self, mode="r", buffering=None, encoding=None,
- errors=None, newline=None):
- """
- Backport of ``socket.makefile`` from Python 3.5.
- """
- if not set(mode) <= set(["r", "w", "b"]):
- raise ValueError(
- "invalid mode %r (only r, w, b allowed)" % (mode,)
- )
- writing = "w" in mode
- reading = "r" in mode or not writing
- assert reading or writing
- binary = "b" in mode
- rawmode = ""
- if reading:
- rawmode += "r"
- if writing:
- rawmode += "w"
- raw = SocketIO(self, rawmode)
- self._makefile_refs += 1
- if buffering is None:
- buffering = -1
- if buffering < 0:
- buffering = io.DEFAULT_BUFFER_SIZE
- if buffering == 0:
- if not binary:
- raise ValueError("unbuffered streams must be binary")
- return raw
- if reading and writing:
- buffer = io.BufferedRWPair(raw, raw, buffering)
- elif reading:
- buffer = io.BufferedReader(raw, buffering)
- else:
- assert writing
- buffer = io.BufferedWriter(raw, buffering)
- if binary:
- return buffer
- text = io.TextIOWrapper(buffer, encoding, errors, newline)
- text.mode = mode
- return text
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
deleted file mode 100644
index 9f7c0e6b8..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
-# Passes Python2.7's test suite and incorporates all the latest updates.
-# Copyright 2009 Raymond Hettinger, released under the MIT License.
-# http://code.activestate.com/recipes/576693/
-# SPDX-License-Identifier: MIT
-try:
- from thread import get_ident as _get_ident
-except ImportError:
- from dummy_thread import get_ident as _get_ident
-
-try:
- from _abcoll import KeysView, ValuesView, ItemsView
-except ImportError:
- pass
-
-
-class OrderedDict(dict):
- 'Dictionary that remembers insertion order'
- # An inherited dict maps keys to values.
- # The inherited dict provides __getitem__, __len__, __contains__, and get.
- # The remaining methods are order-aware.
- # Big-O running times for all methods are the same as for regular dictionaries.
-
- # The internal self.__map dictionary maps keys to links in a doubly linked list.
- # The circular doubly linked list starts and ends with a sentinel element.
- # The sentinel element never gets deleted (this simplifies the algorithm).
- # Each link is stored as a list of length three: [PREV, NEXT, KEY].
-
- def __init__(self, *args, **kwds):
- '''Initialize an ordered dictionary. Signature is the same as for
- regular dictionaries, but keyword arguments are not recommended
- because their insertion order is arbitrary.
-
- '''
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__root
- except AttributeError:
- self.__root = root = [] # sentinel node
- root[:] = [root, root, None]
- self.__map = {}
- self.__update(*args, **kwds)
-
- def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
- 'od.__setitem__(i, y) <==> od[i]=y'
- # Setting a new item creates a new link which goes at the end of the linked
- # list, and the inherited dictionary is updated with the new key/value pair.
- if key not in self:
- root = self.__root
- last = root[0]
- last[1] = root[0] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
-
- def __delitem__(self, key, dict_delitem=dict.__delitem__):
- 'od.__delitem__(y) <==> del od[y]'
- # Deleting an existing item uses self.__map to find the link which is
- # then removed by updating the links in the predecessor and successor nodes.
- dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[1] = link_next
- link_next[0] = link_prev
-
- def __iter__(self):
- 'od.__iter__() <==> iter(od)'
- root = self.__root
- curr = root[1]
- while curr is not root:
- yield curr[2]
- curr = curr[1]
-
- def __reversed__(self):
- 'od.__reversed__() <==> reversed(od)'
- root = self.__root
- curr = root[0]
- while curr is not root:
- yield curr[2]
- curr = curr[0]
-
- def clear(self):
- 'od.clear() -> None. Remove all items from od.'
- try:
- for node in self.__map.itervalues():
- del node[:]
- root = self.__root
- root[:] = [root, root, None]
- self.__map.clear()
- except AttributeError:
- pass
- dict.clear(self)
-
- def popitem(self, last=True):
- '''od.popitem() -> (k, v), return and remove a (key, value) pair.
- Pairs are returned in LIFO order if last is true or FIFO order if false.
-
- '''
- if not self:
- raise KeyError('dictionary is empty')
- root = self.__root
- if last:
- link = root[0]
- link_prev = link[0]
- link_prev[1] = root
- root[0] = link_prev
- else:
- link = root[1]
- link_next = link[1]
- root[1] = link_next
- link_next[0] = root
- key = link[2]
- del self.__map[key]
- value = dict.pop(self, key)
- return key, value
-
- # -- the following methods do not depend on the internal structure --
-
- def keys(self):
- 'od.keys() -> list of keys in od'
- return list(self)
-
- def values(self):
- 'od.values() -> list of values in od'
- return [self[key] for key in self]
-
- def items(self):
- 'od.items() -> list of (key, value) pairs in od'
- return [(key, self[key]) for key in self]
-
- def iterkeys(self):
- 'od.iterkeys() -> an iterator over the keys in od'
- return iter(self)
-
- def itervalues(self):
- 'od.itervalues -> an iterator over the values in od'
- for k in self:
- yield self[k]
-
- def iteritems(self):
- 'od.iteritems -> an iterator over the (key, value) items in od'
- for k in self:
- yield (k, self[k])
-
- def update(*args, **kwds):
- '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
-
- If E is a dict instance, does: for k in E: od[k] = E[k]
- If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
- Or if E is an iterable of items, does: for k, v in E: od[k] = v
- In either case, this is followed by: for k, v in F.items(): od[k] = v
-
- '''
- if len(args) > 2:
- raise TypeError('update() takes at most 2 positional '
- 'arguments (%d given)' % (len(args),))
- elif not args:
- raise TypeError('update() takes at least 1 argument (0 given)')
- self = args[0]
- # Make progressively weaker assumptions about "other"
- other = ()
- if len(args) == 2:
- other = args[1]
- if isinstance(other, dict):
- for key in other:
- self[key] = other[key]
- elif hasattr(other, 'keys'):
- for key in other.keys():
- self[key] = other[key]
- else:
- for key, value in other:
- self[key] = value
- for key, value in kwds.items():
- self[key] = value
-
- __update = update # let subclasses override update without breaking __init__
-
- __marker = object()
-
- def pop(self, key, default=__marker):
- '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
-
- '''
- if key in self:
- result = self[key]
- del self[key]
- return result
- if default is self.__marker:
- raise KeyError(key)
- return default
-
- def setdefault(self, key, default=None):
- 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
- if key in self:
- return self[key]
- self[key] = default
- return default
-
- def __repr__(self, _repr_running={}):
- 'od.__repr__() <==> repr(od)'
- call_key = id(self), _get_ident()
- if call_key in _repr_running:
- return '...'
- _repr_running[call_key] = 1
- try:
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
- finally:
- del _repr_running[call_key]
-
- def __reduce__(self):
- 'Return state information for pickling'
- items = [[k, self[k]] for k in self]
- inst_dict = vars(self).copy()
- for k in vars(OrderedDict()):
- inst_dict.pop(k, None)
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def copy(self):
- 'od.copy() -> a shallow copy of od'
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
- and values equal to v (which defaults to None).
-
- '''
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
- while comparison to a regular mapping is order-insensitive.
-
- '''
- if isinstance(other, OrderedDict):
- return len(self)==len(other) and self.items() == other.items()
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
-
- # -- the following methods are only used in Python 2.7 --
-
- def viewkeys(self):
- "od.viewkeys() -> a set-like object providing a view on od's keys"
- return KeysView(self)
-
- def viewvalues(self):
- "od.viewvalues() -> an object providing a view on od's values"
- return ValuesView(self)
-
- def viewitems(self):
- "od.viewitems() -> a set-like object providing a view on od's items"
- return ItemsView(self)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
deleted file mode 100644
index 31df5012b..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
+++ /dev/null
@@ -1,852 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
-#
-# SPDX-License-Identifier: MIT
-
-from __future__ import absolute_import
-
-import functools
-import itertools
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.10.0"
-
-
-# Useful for very coarse version differentiation.
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-PY34 = sys.version_info[0:2] >= (3, 4)
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- if sys.platform.startswith("java"):
- # Jython always uses 32 bits.
- MAXSIZE = int((1 << 31) - 1)
- else:
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
- class X(object):
-
- def __len__(self):
- return 1 << 31
- try:
- len(X())
- except OverflowError:
- # 32-bit
- MAXSIZE = int((1 << 31) - 1)
- else:
- # 64-bit
- MAXSIZE = int((1 << 63) - 1)
- del X
-
-
-def _add_doc(func, doc):
- """Add documentation to a function."""
- func.__doc__ = doc
-
-
-def _import_module(name):
- """Import module, returning the module after the last dot."""
- __import__(name)
- return sys.modules[name]
-
-
-class _LazyDescr(object):
-
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, tp):
- result = self._resolve()
- setattr(obj, self.name, result) # Invokes __set__.
- try:
- # This is a bit ugly, but it avoids running this again by
- # removing this descriptor.
- delattr(obj.__class__, self.name)
- except AttributeError:
- pass
- return result
-
-
-class MovedModule(_LazyDescr):
-
- def __init__(self, name, old, new=None):
- super(MovedModule, self).__init__(name)
- if PY3:
- if new is None:
- new = name
- self.mod = new
- else:
- self.mod = old
-
- def _resolve(self):
- return _import_module(self.mod)
-
- def __getattr__(self, attr):
- _module = self._resolve()
- value = getattr(_module, attr)
- setattr(self, attr, value)
- return value
-
-
-class _LazyModule(types.ModuleType):
-
- def __init__(self, name):
- super(_LazyModule, self).__init__(name)
- self.__doc__ = self.__class__.__doc__
-
- def __dir__(self):
- attrs = ["__doc__", "__name__"]
- attrs += [attr.name for attr in self._moved_attributes]
- return attrs
-
- # Subclasses should override this
- _moved_attributes = []
-
-
-class MovedAttribute(_LazyDescr):
-
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
- super(MovedAttribute, self).__init__(name)
- if PY3:
- if new_mod is None:
- new_mod = name
- self.mod = new_mod
- if new_attr is None:
- if old_attr is None:
- new_attr = name
- else:
- new_attr = old_attr
- self.attr = new_attr
- else:
- self.mod = old_mod
- if old_attr is None:
- old_attr = name
- self.attr = old_attr
-
- def _resolve(self):
- module = _import_module(self.mod)
- return getattr(module, self.attr)
-
-
-class _SixMetaPathImporter(object):
-
- """
- A meta path importer to import six.moves and its submodules.
-
- This class implements a PEP302 finder and loader. It should be compatible
- with Python 2.5 and all existing versions of Python3
- """
-
- def __init__(self, six_module_name):
- self.name = six_module_name
- self.known_modules = {}
-
- def _add_module(self, mod, *fullnames):
- for fullname in fullnames:
- self.known_modules[self.name + "." + fullname] = mod
-
- def _get_module(self, fullname):
- return self.known_modules[self.name + "." + fullname]
-
- def find_module(self, fullname, path=None):
- if fullname in self.known_modules:
- return self
- return None
-
- def __get_module(self, fullname):
- try:
- return self.known_modules[fullname]
- except KeyError:
- raise ImportError("This loader does not know module " + fullname)
-
- def load_module(self, fullname):
- try:
- # in case of a reload
- return sys.modules[fullname]
- except KeyError:
- pass
- mod = self.__get_module(fullname)
- if isinstance(mod, MovedModule):
- mod = mod._resolve()
- else:
- mod.__loader__ = self
- sys.modules[fullname] = mod
- return mod
-
- def is_package(self, fullname):
- """
- Return true, if the named module is a package.
-
- We need this method to get correct spec objects with
- Python 3.4 (see PEP451)
- """
- return hasattr(self.__get_module(fullname), "__path__")
-
- def get_code(self, fullname):
- """Return None
-
- Required, if is_package is implemented"""
- self.__get_module(fullname) # eventually raises ImportError
- return None
- get_source = get_code # same as get_code
-
-_importer = _SixMetaPathImporter(__name__)
-
-
-class _MovedItems(_LazyModule):
-
- """Lazy loading of moved objects"""
- __path__ = [] # mark as package
-
-
-_moved_attributes = [
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
- MovedAttribute("intern", "__builtin__", "sys"),
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
- MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
- MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
- MovedAttribute("reduce", "__builtin__", "functools"),
- MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
- MovedAttribute("StringIO", "StringIO", "io"),
- MovedAttribute("UserDict", "UserDict", "collections"),
- MovedAttribute("UserList", "UserList", "collections"),
- MovedAttribute("UserString", "UserString", "collections"),
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
- MovedModule("builtins", "__builtin__"),
- MovedModule("configparser", "ConfigParser"),
- MovedModule("copyreg", "copy_reg"),
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
- MovedModule("http_cookies", "Cookie", "http.cookies"),
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
- MovedModule("html_parser", "HTMLParser", "html.parser"),
- MovedModule("http_client", "httplib", "http.client"),
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
- MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
- MovedModule("cPickle", "cPickle", "pickle"),
- MovedModule("queue", "Queue"),
- MovedModule("reprlib", "repr"),
- MovedModule("socketserver", "SocketServer"),
- MovedModule("_thread", "thread", "_thread"),
- MovedModule("tkinter", "Tkinter"),
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
- MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
- MovedModule("tkinter_colorchooser", "tkColorChooser",
- "tkinter.colorchooser"),
- MovedModule("tkinter_commondialog", "tkCommonDialog",
- "tkinter.commondialog"),
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
- "tkinter.simpledialog"),
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
- MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
-]
-# Add windows specific modules.
-if sys.platform == "win32":
- _moved_attributes += [
- MovedModule("winreg", "_winreg"),
- ]
-
-for attr in _moved_attributes:
- setattr(_MovedItems, attr.name, attr)
- if isinstance(attr, MovedModule):
- _importer._add_module(attr, "moves." + attr.name)
-del attr
-
-_MovedItems._moved_attributes = _moved_attributes
-
-moves = _MovedItems(__name__ + ".moves")
-_importer._add_module(moves, "moves")
-
-
-class Module_six_moves_urllib_parse(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_parse"""
-
-
-_urllib_parse_moved_attributes = [
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
- MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
- MovedAttribute("quote", "urllib", "urllib.parse"),
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
- MovedAttribute("unquote", "urllib", "urllib.parse"),
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
- MovedAttribute("splitquery", "urllib", "urllib.parse"),
- MovedAttribute("splittag", "urllib", "urllib.parse"),
- MovedAttribute("splituser", "urllib", "urllib.parse"),
- MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
- MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
- MovedAttribute("uses_params", "urlparse", "urllib.parse"),
- MovedAttribute("uses_query", "urlparse", "urllib.parse"),
- MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
-]
-for attr in _urllib_parse_moved_attributes:
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
- "moves.urllib_parse", "moves.urllib.parse")
-
-
-class Module_six_moves_urllib_error(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_error"""
-
-
-_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
-]
-for attr in _urllib_error_moved_attributes:
- setattr(Module_six_moves_urllib_error, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
- "moves.urllib_error", "moves.urllib.error")
-
-
-class Module_six_moves_urllib_request(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_request"""
-
-
-_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
- MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
- MovedAttribute("URLopener", "urllib", "urllib.request"),
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
- MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
-]
-for attr in _urllib_request_moved_attributes:
- setattr(Module_six_moves_urllib_request, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
- "moves.urllib_request", "moves.urllib.request")
-
-
-class Module_six_moves_urllib_response(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_response"""
-
-
-_urllib_response_moved_attributes = [
- MovedAttribute("addbase", "urllib", "urllib.response"),
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
- MovedAttribute("addinfo", "urllib", "urllib.response"),
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
-]
-for attr in _urllib_response_moved_attributes:
- setattr(Module_six_moves_urllib_response, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
- "moves.urllib_response", "moves.urllib.response")
-
-
-class Module_six_moves_urllib_robotparser(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
-
-
-_urllib_robotparser_moved_attributes = [
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
-]
-for attr in _urllib_robotparser_moved_attributes:
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
- "moves.urllib_robotparser", "moves.urllib.robotparser")
-
-
-class Module_six_moves_urllib(types.ModuleType):
-
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
- __path__ = [] # mark as package
- parse = _importer._get_module("moves.urllib_parse")
- error = _importer._get_module("moves.urllib_error")
- request = _importer._get_module("moves.urllib_request")
- response = _importer._get_module("moves.urllib_response")
- robotparser = _importer._get_module("moves.urllib_robotparser")
-
- def __dir__(self):
- return ['parse', 'error', 'request', 'response', 'robotparser']
-
-_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
- "moves.urllib")
-
-
-def add_move(move):
- """Add an item to six.moves."""
- setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
- """Remove item from six.moves."""
- try:
- delattr(_MovedItems, name)
- except AttributeError:
- try:
- del moves.__dict__[name]
- except KeyError:
- raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
- _meth_func = "__func__"
- _meth_self = "__self__"
-
- _func_closure = "__closure__"
- _func_code = "__code__"
- _func_defaults = "__defaults__"
- _func_globals = "__globals__"
-else:
- _meth_func = "im_func"
- _meth_self = "im_self"
-
- _func_closure = "func_closure"
- _func_code = "func_code"
- _func_defaults = "func_defaults"
- _func_globals = "func_globals"
-
-
-try:
- advance_iterator = next
-except NameError:
- def advance_iterator(it):
- return it.next()
-next = advance_iterator
-
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-if PY3:
- def get_unbound_function(unbound):
- return unbound
-
- create_bound_method = types.MethodType
-
- def create_unbound_method(func, cls):
- return func
-
- Iterator = object
-else:
- def get_unbound_function(unbound):
- return unbound.im_func
-
- def create_bound_method(func, obj):
- return types.MethodType(func, obj, obj.__class__)
-
- def create_unbound_method(func, cls):
- return types.MethodType(func, None, cls)
-
- class Iterator(object):
-
- def next(self):
- return type(self).__next__(self)
-
- callable = callable
-_add_doc(get_unbound_function,
- """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_closure = operator.attrgetter(_func_closure)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-get_function_globals = operator.attrgetter(_func_globals)
-
-
-if PY3:
- def iterkeys(d, **kw):
- return iter(d.keys(**kw))
-
- def itervalues(d, **kw):
- return iter(d.values(**kw))
-
- def iteritems(d, **kw):
- return iter(d.items(**kw))
-
- def iterlists(d, **kw):
- return iter(d.lists(**kw))
-
- viewkeys = operator.methodcaller("keys")
-
- viewvalues = operator.methodcaller("values")
-
- viewitems = operator.methodcaller("items")
-else:
- def iterkeys(d, **kw):
- return d.iterkeys(**kw)
-
- def itervalues(d, **kw):
- return d.itervalues(**kw)
-
- def iteritems(d, **kw):
- return d.iteritems(**kw)
-
- def iterlists(d, **kw):
- return d.iterlists(**kw)
-
- viewkeys = operator.methodcaller("viewkeys")
-
- viewvalues = operator.methodcaller("viewvalues")
-
- viewitems = operator.methodcaller("viewitems")
-
-_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
-_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
-_add_doc(iteritems,
- "Return an iterator over the (key, value) pairs of a dictionary.")
-_add_doc(iterlists,
- "Return an iterator over the (key, [values]) pairs of a dictionary.")
-
-
-if PY3:
- def b(s):
- return s.encode("latin-1")
-
- def u(s):
- return s
- unichr = chr
- import struct
- int2byte = struct.Struct(">B").pack
- del struct
- byte2int = operator.itemgetter(0)
- indexbytes = operator.getitem
- iterbytes = iter
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
- _assertCountEqual = "assertCountEqual"
- if sys.version_info[1] <= 1:
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
- else:
- _assertRaisesRegex = "assertRaisesRegex"
- _assertRegex = "assertRegex"
-else:
- def b(s):
- return s
- # Workaround for standalone backslash
-
- def u(s):
- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
- unichr = unichr
- int2byte = chr
-
- def byte2int(bs):
- return ord(bs[0])
-
- def indexbytes(buf, i):
- return ord(buf[i])
- iterbytes = functools.partial(itertools.imap, ord)
- import StringIO
- StringIO = BytesIO = StringIO.StringIO
- _assertCountEqual = "assertItemsEqual"
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-def assertCountEqual(self, *args, **kwargs):
- return getattr(self, _assertCountEqual)(*args, **kwargs)
-
-
-def assertRaisesRegex(self, *args, **kwargs):
- return getattr(self, _assertRaisesRegex)(*args, **kwargs)
-
-
-def assertRegex(self, *args, **kwargs):
- return getattr(self, _assertRegex)(*args, **kwargs)
-
-
-if PY3:
- exec_ = getattr(moves.builtins, "exec")
-
- def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- def exec_(_code_, _globs_=None, _locs_=None):
- """Execute code in a namespace."""
- if _globs_ is None:
- frame = sys._getframe(1)
- _globs_ = frame.f_globals
- if _locs_ is None:
- _locs_ = frame.f_locals
- del frame
- elif _locs_ is None:
- _locs_ = _globs_
- exec("""exec _code_ in _globs_, _locs_""")
-
- exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
-""")
-
-
-if sys.version_info[:2] == (3, 2):
- exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
-""")
-else:
- def raise_from(value, from_value):
- raise value
-
-
-print_ = getattr(moves.builtins, "print", None)
-if print_ is None:
- def print_(*args, **kwargs):
- """The new-style print function for Python 2.4 and 2.5."""
- fp = kwargs.pop("file", sys.stdout)
- if fp is None:
- return
-
- def write(data):
- if not isinstance(data, basestring):
- data = str(data)
- # If the file has an encoding, encode unicode with it.
- if (isinstance(fp, file) and
- isinstance(data, unicode) and
- fp.encoding is not None):
- errors = getattr(fp, "errors", None)
- if errors is None:
- errors = "strict"
- data = data.encode(fp.encoding, errors)
- fp.write(data)
- want_unicode = False
- sep = kwargs.pop("sep", None)
- if sep is not None:
- if isinstance(sep, unicode):
- want_unicode = True
- elif not isinstance(sep, str):
- raise TypeError("sep must be None or a string")
- end = kwargs.pop("end", None)
- if end is not None:
- if isinstance(end, unicode):
- want_unicode = True
- elif not isinstance(end, str):
- raise TypeError("end must be None or a string")
- if kwargs:
- raise TypeError("invalid keyword arguments to print()")
- if not want_unicode:
- for arg in args:
- if isinstance(arg, unicode):
- want_unicode = True
- break
- if want_unicode:
- newline = unicode("\n")
- space = unicode(" ")
- else:
- newline = "\n"
- space = " "
- if sep is None:
- sep = space
- if end is None:
- end = newline
- for i, arg in enumerate(args):
- if i:
- write(sep)
- write(arg)
- write(end)
-if sys.version_info[:2] < (3, 3):
- _print = print_
-
- def print_(*args, **kwargs):
- fp = kwargs.get("file", sys.stdout)
- flush = kwargs.pop("flush", False)
- _print(*args, **kwargs)
- if flush and fp is not None:
- fp.flush()
-
-_add_doc(reraise, """Reraise an exception.""")
-
-if sys.version_info[0:2] < (3, 4):
- def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
- updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
-else:
- wraps = functools.wraps
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a dummy
- # metaclass for one level of class instantiation that replaces itself with
- # the actual metaclass.
- class metaclass(meta):
-
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- slots = orig_vars.get('__slots__')
- if slots is not None:
- if isinstance(slots, str):
- slots = [slots]
- for slots_var in slots:
- orig_vars.pop(slots_var)
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
-
-
-def python_2_unicode_compatible(klass):
- """
- A decorator that defines __unicode__ and __str__ methods under Python 2.
- Under Python 3 it does nothing.
-
- To support Python 2 and 3 with a single code base, define a __str__ method
- returning text and apply this decorator to the class.
- """
- if PY2:
- if '__str__' not in klass.__dict__:
- raise ValueError("@python_2_unicode_compatible cannot be applied "
- "to %s because it doesn't define __str__()." %
- klass.__name__)
- klass.__unicode__ = klass.__str__
- klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
- return klass
-
-
-# Complete the moves implementation.
-# This code is at the end of this module to speed up module loading.
-# Turn this module into a package.
-__path__ = [] # required for PEP 302 and PEP 451
-__package__ = __name__ # see PEP 366 @ReservedAssignment
-if globals().get("__spec__") is not None:
- __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
-# Remove other six meta path importers, since they cause problems. This can
-# happen if six is removed from sys.modules and then reloaded. (Setuptools does
-# this for some reason.)
-if sys.meta_path:
- for i, importer in enumerate(sys.meta_path):
- # Here's some real nastiness: Another "instance" of the six module might
- # be floating around. Therefore, we can't use isinstance() to check for
- # the six meta path importer, since the other six instance will have
- # inserted an importer with different class.
- if (type(importer).__name__ == "_SixMetaPathImporter" and
- importer.name == __name__):
- del sys.meta_path[i]
- break
- del i, importer
-# Finally, add the importer to the meta path import hook.
-sys.meta_path.append(_importer)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
deleted file mode 100644
index 2aeeeff91..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: MIT
-import sys
-
-try:
- # Our match_hostname function is the same as 3.5's, so we only want to
- # import the match_hostname function if it's at least that good.
- if sys.version_info < (3, 5):
- raise ImportError("Fallback to vendored code")
-
- from ssl import CertificateError, match_hostname
-except ImportError:
- try:
- # Backport of the function from a pypi module
- from backports.ssl_match_hostname import CertificateError, match_hostname
- except ImportError:
- # Our vendored copy
- from ._implementation import CertificateError, match_hostname
-
-# Not needed, but documenting what we provide.
-__all__ = ('CertificateError', 'match_hostname')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
deleted file mode 100644
index 647e081da..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ /dev/null
@@ -1,156 +0,0 @@
-"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-
-# SPDX-License-Identifier: Python-2.0
-
-import re
-import sys
-
-# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
-# system, use it to handle IPAddress ServerAltnames (this was added in
-# python-3.5) otherwise only do DNS matching. This allows
-# backports.ssl_match_hostname to continue to be used all the way back to
-# python-2.4.
-try:
- import ipaddress
-except ImportError:
- ipaddress = None
-
-__version__ = '3.5.0.1'
-
-
-class CertificateError(ValueError):
- pass
-
-
-def _dnsname_match(dn, hostname, max_wildcards=1):
- """Matching according to RFC 6125, section 6.4.3
-
- http://tools.ietf.org/html/rfc6125#section-6.4.3
- """
- pats = []
- if not dn:
- return False
-
- # Ported from python3-syntax:
- # leftmost, *remainder = dn.split(r'.')
- parts = dn.split(r'.')
- leftmost = parts[0]
- remainder = parts[1:]
-
- wildcards = leftmost.count('*')
- if wildcards > max_wildcards:
- # Issue #17980: avoid denials of service by refusing more
- # than one wildcard per fragment. A survey of established
- # policy among SSL implementations showed it to be a
- # reasonable choice.
- raise CertificateError(
- "too many wildcards in certificate DNS name: " + repr(dn))
-
- # speed up common case w/o wildcards
- if not wildcards:
- return dn.lower() == hostname.lower()
-
- # RFC 6125, section 6.4.3, subitem 1.
- # The client SHOULD NOT attempt to match a presented identifier in which
- # the wildcard character comprises a label other than the left-most label.
- if leftmost == '*':
- # When '*' is a fragment by itself, it matches a non-empty dotless
- # fragment.
- pats.append('[^.]+')
- elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
- # RFC 6125, section 6.4.3, subitem 3.
- # The client SHOULD NOT attempt to match a presented identifier
- # where the wildcard character is embedded within an A-label or
- # U-label of an internationalized domain name.
- pats.append(re.escape(leftmost))
- else:
- # Otherwise, '*' matches any dotless string, e.g. www*
- pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
- # add the remaining fragments, ignore any wildcards
- for frag in remainder:
- pats.append(re.escape(frag))
-
- pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
- return pat.match(hostname)
-
-
-def _to_unicode(obj):
- if isinstance(obj, str) and sys.version_info < (3,):
- obj = unicode(obj, encoding='ascii', errors='strict')
- return obj
-
-def _ipaddress_match(ipname, host_ip):
- """Exact matching of IP addresses.
-
- RFC 6125 explicitly doesn't define an algorithm for this
- (section 1.7.2 - "Out of Scope").
- """
- # OpenSSL may add a trailing newline to a subjectAltName's IP address
- # Divergence from upstream: ipaddress can't handle byte str
- ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
- return ip == host_ip
-
-
-def match_hostname(cert, hostname):
- """Verify that *cert* (in decoded format as returned by
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
- rules are followed, but IP addresses are not accepted for *hostname*.
-
- CertificateError is raised on failure. On success, the function
- returns nothing.
- """
- if not cert:
- raise ValueError("empty or no certificate, match_hostname needs a "
- "SSL socket or SSL context with either "
- "CERT_OPTIONAL or CERT_REQUIRED")
- try:
- # Divergence from upstream: ipaddress can't handle byte str
- host_ip = ipaddress.ip_address(_to_unicode(hostname))
- except ValueError:
- # Not an IP address (common case)
- host_ip = None
- except UnicodeError:
- # Divergence from upstream: Have to deal with ipaddress not taking
- # byte strings. addresses should be all ascii, so we consider it not
- # an ipaddress in this case
- host_ip = None
- except AttributeError:
- # Divergence from upstream: Make ipaddress library optional
- if ipaddress is None:
- host_ip = None
- else:
- raise
- dnsnames = []
- san = cert.get('subjectAltName', ())
- for key, value in san:
- if key == 'DNS':
- if host_ip is None and _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- elif key == 'IP Address':
- if host_ip is not None and _ipaddress_match(value, host_ip):
- return
- dnsnames.append(value)
- if not dnsnames:
- # The subject is only checked when there is no dNSName entry
- # in subjectAltName
- for sub in cert.get('subject', ()):
- for key, value in sub:
- # XXX according to RFC 2818, the most specific Common Name
- # must be used.
- if key == 'commonName':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if len(dnsnames) > 1:
- raise CertificateError("hostname %r "
- "doesn't match either of %s"
- % (hostname, ', '.join(map(repr, dnsnames))))
- elif len(dnsnames) == 1:
- raise CertificateError("hostname %r "
- "doesn't match %r"
- % (hostname, dnsnames[0]))
- else:
- raise CertificateError("no appropriate commonName or "
- "subjectAltName fields were found")
diff --git a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
deleted file mode 100644
index adea9bc01..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import collections
-import functools
-import logging
-
-from ._collections import RecentlyUsedContainer
-from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
-from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
-from .packages.six.moves.urllib.parse import urljoin
-from .request import RequestMethods
-from .util.url import parse_url
-from .util.retry import Retry
-
-
-__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
-
-
-log = logging.getLogger(__name__)
-
-SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
- 'ssl_version', 'ca_cert_dir', 'ssl_context')
-
-# All known keyword arguments that could be provided to the pool manager, its
-# pools, or the underlying connections. This is used to construct a pool key.
-_key_fields = (
- 'key_scheme', # str
- 'key_host', # str
- 'key_port', # int
- 'key_timeout', # int or float or Timeout
- 'key_retries', # int or Retry
- 'key_strict', # bool
- 'key_block', # bool
- 'key_source_address', # str
- 'key_key_file', # str
- 'key_cert_file', # str
- 'key_cert_reqs', # str
- 'key_ca_certs', # str
- 'key_ssl_version', # str
- 'key_ca_cert_dir', # str
- 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
- 'key_maxsize', # int
- 'key_headers', # dict
- 'key__proxy', # parsed proxy url
- 'key__proxy_headers', # dict
- 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
- 'key__socks_options', # dict
- 'key_assert_hostname', # bool or string
- 'key_assert_fingerprint', # str
-)
-
-#: The namedtuple class used to construct keys for the connection pool.
-#: All custom key schemes should include the fields in this key at a minimum.
-PoolKey = collections.namedtuple('PoolKey', _key_fields)
-
-
-def _default_key_normalizer(key_class, request_context):
- """
- Create a pool key out of a request context dictionary.
-
- According to RFC 3986, both the scheme and host are case-insensitive.
- Therefore, this function normalizes both before constructing the pool
- key for an HTTPS request. If you wish to change this behaviour, provide
- alternate callables to ``key_fn_by_scheme``.
-
- :param key_class:
- The class to use when constructing the key. This should be a namedtuple
- with the ``scheme`` and ``host`` keys at a minimum.
- :type key_class: namedtuple
- :param request_context:
- A dictionary-like object that contain the context for a request.
- :type request_context: dict
-
- :return: A namedtuple that can be used as a connection pool key.
- :rtype: PoolKey
- """
- # Since we mutate the dictionary, make a copy first
- context = request_context.copy()
- context['scheme'] = context['scheme'].lower()
- context['host'] = context['host'].lower()
-
- # These are both dictionaries and need to be transformed into frozensets
- for key in ('headers', '_proxy_headers', '_socks_options'):
- if key in context and context[key] is not None:
- context[key] = frozenset(context[key].items())
-
- # The socket_options key may be a list and needs to be transformed into a
- # tuple.
- socket_opts = context.get('socket_options')
- if socket_opts is not None:
- context['socket_options'] = tuple(socket_opts)
-
- # Map the kwargs to the names in the namedtuple - this is necessary since
- # namedtuples can't have fields starting with '_'.
- for key in list(context.keys()):
- context['key_' + key] = context.pop(key)
-
- # Default to ``None`` for keys missing from the context
- for field in key_class._fields:
- if field not in context:
- context[field] = None
-
- return key_class(**context)
-
-
-#: A dictionary that maps a scheme to a callable that creates a pool key.
-#: This can be used to alter the way pool keys are constructed, if desired.
-#: Each PoolManager makes a copy of this dictionary so they can be configured
-#: globally here, or individually on the instance.
-key_fn_by_scheme = {
- 'http': functools.partial(_default_key_normalizer, PoolKey),
- 'https': functools.partial(_default_key_normalizer, PoolKey),
-}
-
-pool_classes_by_scheme = {
- 'http': HTTPConnectionPool,
- 'https': HTTPSConnectionPool,
-}
-
-
-class PoolManager(RequestMethods):
- """
- Allows for arbitrary requests while transparently keeping track of
- necessary connection pools for you.
-
- :param num_pools:
- Number of connection pools to cache before discarding the least
- recently used pool.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param \\**connection_pool_kw:
- Additional parameters are used to create fresh
- :class:`urllib3.connectionpool.ConnectionPool` instances.
-
- Example::
-
- >>> manager = PoolManager(num_pools=2)
- >>> r = manager.request('GET', 'http://google.com/')
- >>> r = manager.request('GET', 'http://google.com/mail')
- >>> r = manager.request('GET', 'http://yahoo.com/')
- >>> len(manager.pools)
- 2
-
- """
-
- proxy = None
-
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
- RequestMethods.__init__(self, headers)
- self.connection_pool_kw = connection_pool_kw
- self.pools = RecentlyUsedContainer(num_pools,
- dispose_func=lambda p: p.close())
-
- # Locally set the pool classes and keys so other PoolManagers can
- # override them.
- self.pool_classes_by_scheme = pool_classes_by_scheme
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.clear()
- # Return False to re-raise any potential exceptions
- return False
-
- def _new_pool(self, scheme, host, port, request_context=None):
- """
- Create a new :class:`ConnectionPool` based on host, port, scheme, and
- any additional pool keyword arguments.
-
- If ``request_context`` is provided, it is provided as keyword arguments
- to the pool class used. This method is used to actually create the
- connection pools handed out by :meth:`connection_from_url` and
- companion methods. It is intended to be overridden for customization.
- """
- pool_cls = self.pool_classes_by_scheme[scheme]
- if request_context is None:
- request_context = self.connection_pool_kw.copy()
-
- # Although the context has everything necessary to create the pool,
- # this function has historically only used the scheme, host, and port
- # in the positional args. When an API change is acceptable these can
- # be removed.
- for key in ('scheme', 'host', 'port'):
- request_context.pop(key, None)
-
- if scheme == 'http':
- for kw in SSL_KEYWORDS:
- request_context.pop(kw, None)
-
- return pool_cls(host, port, **request_context)
-
- def clear(self):
- """
- Empty our store of pools and direct them all to close.
-
- This will not affect in-flight connections, but they will not be
- re-used after completion.
- """
- self.pools.clear()
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- """
- Get a :class:`ConnectionPool` based on the host, port, and scheme.
-
- If ``port`` isn't given, it will be derived from the ``scheme`` using
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
- provided, it is merged with the instance's ``connection_pool_kw``
- variable and used to create the new connection pool, if one is
- needed.
- """
-
- if not host:
- raise LocationValueError("No host specified.")
-
- request_context = self._merge_pool_kwargs(pool_kwargs)
- request_context['scheme'] = scheme or 'http'
- if not port:
- port = port_by_scheme.get(request_context['scheme'].lower(), 80)
- request_context['port'] = port
- request_context['host'] = host
-
- return self.connection_from_context(request_context)
-
- def connection_from_context(self, request_context):
- """
- Get a :class:`ConnectionPool` based on the request context.
-
- ``request_context`` must at least contain the ``scheme`` key and its
- value must be a key in ``key_fn_by_scheme`` instance variable.
- """
- scheme = request_context['scheme'].lower()
- pool_key_constructor = self.key_fn_by_scheme[scheme]
- pool_key = pool_key_constructor(request_context)
-
- return self.connection_from_pool_key(pool_key, request_context=request_context)
-
- def connection_from_pool_key(self, pool_key, request_context=None):
- """
- Get a :class:`ConnectionPool` based on the provided pool key.
-
- ``pool_key`` should be a namedtuple that only contains immutable
- objects. At a minimum it must have the ``scheme``, ``host``, and
- ``port`` fields.
- """
- with self.pools.lock:
- # If the scheme, host, or port doesn't match existing open
- # connections, open a new ConnectionPool.
- pool = self.pools.get(pool_key)
- if pool:
- return pool
-
- # Make a fresh ConnectionPool of the desired type
- scheme = request_context['scheme']
- host = request_context['host']
- port = request_context['port']
- pool = self._new_pool(scheme, host, port, request_context=request_context)
- self.pools[pool_key] = pool
-
- return pool
-
- def connection_from_url(self, url, pool_kwargs=None):
- """
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
-
- If ``pool_kwargs`` is not provided and a new pool needs to be
- constructed, ``self.connection_pool_kw`` is used to initialize
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
- is provided, it is used instead. Note that if a new pool does not
- need to be created for the request, the provided ``pool_kwargs`` are
- not used.
- """
- u = parse_url(url)
- return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
- pool_kwargs=pool_kwargs)
-
- def _merge_pool_kwargs(self, override):
- """
- Merge a dictionary of override values for self.connection_pool_kw.
-
- This does not modify self.connection_pool_kw and returns a new dict.
- Any keys in the override dictionary with a value of ``None`` are
- removed from the merged dictionary.
- """
- base_pool_kwargs = self.connection_pool_kw.copy()
- if override:
- for key, value in override.items():
- if value is None:
- try:
- del base_pool_kwargs[key]
- except KeyError:
- pass
- else:
- base_pool_kwargs[key] = value
- return base_pool_kwargs
-
- def urlopen(self, method, url, redirect=True, **kw):
- """
- Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
- with custom cross-host redirect logic and only sends the request-uri
- portion of the ``url``.
-
- The given ``url`` parameter must be absolute, such that an appropriate
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
- """
- u = parse_url(url)
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
-
- kw['assert_same_host'] = False
- kw['redirect'] = False
- if 'headers' not in kw:
- kw['headers'] = self.headers
-
- if self.proxy is not None and u.scheme == "http":
- response = conn.urlopen(method, url, **kw)
- else:
- response = conn.urlopen(method, u.request_uri, **kw)
-
- redirect_location = redirect and response.get_redirect_location()
- if not redirect_location:
- return response
-
- # Support relative URLs for redirecting.
- redirect_location = urljoin(url, redirect_location)
-
- # RFC 7231, Section 6.4.4
- if response.status == 303:
- method = 'GET'
-
- retries = kw.get('retries')
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect)
-
- try:
- retries = retries.increment(method, url, response=response, _pool=conn)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise
- return response
-
- kw['retries'] = retries
- kw['redirect'] = redirect
-
- log.info("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(method, redirect_location, **kw)
-
-
-class ProxyManager(PoolManager):
- """
- Behaves just like :class:`PoolManager`, but sends all requests through
- the defined proxy, using the CONNECT method for HTTPS URLs.
-
- :param proxy_url:
- The URL of the proxy to be used.
-
- :param proxy_headers:
- A dictionary contaning headers that will be sent to the proxy. In case
- of HTTP they are being sent with each request, while in the
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
- authentication.
-
- Example:
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
- >>> r1 = proxy.request('GET', 'http://google.com/')
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
- >>> len(proxy.pools)
- 1
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
- >>> len(proxy.pools)
- 3
-
- """
-
- def __init__(self, proxy_url, num_pools=10, headers=None,
- proxy_headers=None, **connection_pool_kw):
-
- if isinstance(proxy_url, HTTPConnectionPool):
- proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
- proxy_url.port)
- proxy = parse_url(proxy_url)
- if not proxy.port:
- port = port_by_scheme.get(proxy.scheme, 80)
- proxy = proxy._replace(port=port)
-
- if proxy.scheme not in ("http", "https"):
- raise ProxySchemeUnknown(proxy.scheme)
-
- self.proxy = proxy
- self.proxy_headers = proxy_headers or {}
-
- connection_pool_kw['_proxy'] = self.proxy
- connection_pool_kw['_proxy_headers'] = self.proxy_headers
-
- super(ProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw)
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- if scheme == "https":
- return super(ProxyManager, self).connection_from_host(
- host, port, scheme, pool_kwargs=pool_kwargs)
-
- return super(ProxyManager, self).connection_from_host(
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
-
- def _set_proxy_headers(self, url, headers=None):
- """
- Sets headers needed by proxies: specifically, the Accept and Host
- headers. Only sets headers not provided by the user.
- """
- headers_ = {'Accept': '*/*'}
-
- netloc = parse_url(url).netloc
- if netloc:
- headers_['Host'] = netloc
-
- if headers:
- headers_.update(headers)
- return headers_
-
- def urlopen(self, method, url, redirect=True, **kw):
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
- u = parse_url(url)
-
- if u.scheme == "http":
- # For proxied HTTPS requests, httplib sets the necessary headers
- # on the CONNECT to the proxy. For HTTP, we'll definitely
- # need to set 'Host' at the very least.
- headers = kw.get('headers', self.headers)
- kw['headers'] = self._set_proxy_headers(url, headers)
-
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
-
-
-def proxy_from_url(url, **kw):
- return ProxyManager(proxy_url=url, **kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/request.py b/collectors/python.d.plugin/python_modules/urllib3/request.py
deleted file mode 100644
index f78331975..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/request.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-
-from .filepost import encode_multipart_formdata
-from .packages.six.moves.urllib.parse import urlencode
-
-
-__all__ = ['RequestMethods']
-
-
-class RequestMethods(object):
- """
- Convenience mixin for classes who implement a :meth:`urlopen` method, such
- as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
- :class:`~urllib3.poolmanager.PoolManager`.
-
- Provides behavior for making common types of HTTP request methods and
- decides which type of request field encoding to use.
-
- Specifically,
-
- :meth:`.request_encode_url` is for sending requests whose fields are
- encoded in the URL (such as GET, HEAD, DELETE).
-
- :meth:`.request_encode_body` is for sending requests whose fields are
- encoded in the *body* of the request using multipart or www-form-urlencoded
- (such as for POST, PUT, PATCH).
-
- :meth:`.request` is for making any kind of request, it will look up the
- appropriate encoding format and use one of the above two methods to make
- the request.
-
- Initializer parameters:
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
- """
-
- _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
-
- def __init__(self, headers=None):
- self.headers = headers or {}
-
- def urlopen(self, method, url, body=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **kw): # Abstract
- raise NotImplemented("Classes extending RequestMethods must implement "
- "their own ``urlopen`` method.")
-
- def request(self, method, url, fields=None, headers=None, **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the appropriate encoding of
- ``fields`` based on the ``method`` used.
-
- This is a convenience method that requires the least amount of manual
- effort. It can be used in most situations, while still having the
- option to drop down to more specific methods when necessary, such as
- :meth:`request_encode_url`, :meth:`request_encode_body`,
- or even the lowest level :meth:`urlopen`.
- """
- method = method.upper()
-
- if method in self._encode_url_methods:
- return self.request_encode_url(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
- else:
- return self.request_encode_body(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
-
- def request_encode_url(self, method, url, fields=None, headers=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the url. This is useful for request methods like GET, HEAD, DELETE, etc.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': headers}
- extra_kw.update(urlopen_kw)
-
- if fields:
- url += '?' + urlencode(fields)
-
- return self.urlopen(method, url, **extra_kw)
-
- def request_encode_body(self, method, url, fields=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the body. This is useful for request methods like POST, PUT, PATCH, etc.
-
- When ``encode_multipart=True`` (default), then
- :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
- the payload with the appropriate content type. Otherwise
- :meth:`urllib.urlencode` is used with the
- 'application/x-www-form-urlencoded' content type.
-
- Multipart encoding must be used when posting files, and it's reasonably
- safe to use it in other times too. However, it may break request
- signing, such as with OAuth.
-
- Supports an optional ``fields`` parameter of key/value strings AND
- key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
- the MIME type is optional. For example::
-
- fields = {
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(),
- 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
- }
-
- When uploading a file, providing a filename (the first parameter of the
- tuple) is optional but recommended to best mimick behavior of browsers.
-
- Note that if ``headers`` are supplied, the 'Content-Type' header will
- be overwritten because it depends on the dynamic random boundary string
- which is used to compose the body of the request. The random boundary
- string can be explicitly set with the ``multipart_boundary`` parameter.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': {}}
-
- if fields:
- if 'body' in urlopen_kw:
- raise TypeError(
- "request got values for both 'fields' and 'body', can only specify one.")
-
- if encode_multipart:
- body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
- else:
- body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
-
- extra_kw['body'] = body
- extra_kw['headers'] = {'Content-Type': content_type}
-
- extra_kw['headers'].update(headers)
- extra_kw.update(urlopen_kw)
-
- return self.urlopen(method, url, **extra_kw)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/response.py b/collectors/python.d.plugin/python_modules/urllib3/response.py
deleted file mode 100644
index cf14a3076..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/response.py
+++ /dev/null
@@ -1,623 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from contextlib import contextmanager
-import zlib
-import io
-import logging
-from socket import timeout as SocketTimeout
-from socket import error as SocketError
-
-from ._collections import HTTPHeaderDict
-from .exceptions import (
- BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
- ResponseNotChunked, IncompleteRead, InvalidHeader
-)
-from .packages.six import string_types as basestring, binary_type, PY3
-from .packages.six.moves import http_client as httplib
-from .connection import HTTPException, BaseSSLError
-from .util.response import is_fp_closed, is_response_to_head
-
-log = logging.getLogger(__name__)
-
-
-class DeflateDecoder(object):
-
- def __init__(self):
- self._first_try = True
- self._data = binary_type()
- self._obj = zlib.decompressobj()
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
-
- if not self._first_try:
- return self._obj.decompress(data)
-
- self._data += data
- try:
- decompressed = self._obj.decompress(data)
- if decompressed:
- self._first_try = False
- self._data = None
- return decompressed
- except zlib.error:
- self._first_try = False
- self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
- try:
- return self.decompress(self._data)
- finally:
- self._data = None
-
-
-class GzipDecoder(object):
-
- def __init__(self):
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
- return self._obj.decompress(data)
-
-
-def _get_decoder(mode):
- if mode == 'gzip':
- return GzipDecoder()
-
- return DeflateDecoder()
-
-
-class HTTPResponse(io.IOBase):
- """
- HTTP Response container.
-
- Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
- loaded and decoded on-demand when the ``data`` property is accessed. This
- class is also compatible with the Python standard library's :mod:`io`
- module, and can hence be treated as a readable object in the context of that
- framework.
-
- Extra parameters for behaviour not present in httplib.HTTPResponse:
-
- :param preload_content:
- If True, the response's body will be preloaded during construction.
-
- :param decode_content:
- If True, attempts to decode specific content-encoding's based on headers
- (like 'gzip' and 'deflate') will be skipped and raw data will be used
- instead.
-
- :param original_response:
- When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
- object, it's convenient to include the original for debug purposes. It's
- otherwise unused.
-
- :param retries:
- The retries contains the last :class:`~urllib3.util.retry.Retry` that
- was used during the request.
-
- :param enforce_content_length:
- Enforce content length checking. Body returned by server must match
- value of Content-Length header, if present. Otherwise, raise error.
- """
-
- CONTENT_DECODERS = ['gzip', 'deflate']
- REDIRECT_STATUSES = [301, 302, 303, 307, 308]
-
- def __init__(self, body='', headers=None, status=0, version=0, reason=None,
- strict=0, preload_content=True, decode_content=True,
- original_response=None, pool=None, connection=None,
- retries=None, enforce_content_length=False, request_method=None):
-
- if isinstance(headers, HTTPHeaderDict):
- self.headers = headers
- else:
- self.headers = HTTPHeaderDict(headers)
- self.status = status
- self.version = version
- self.reason = reason
- self.strict = strict
- self.decode_content = decode_content
- self.retries = retries
- self.enforce_content_length = enforce_content_length
-
- self._decoder = None
- self._body = None
- self._fp = None
- self._original_response = original_response
- self._fp_bytes_read = 0
-
- if body and isinstance(body, (basestring, binary_type)):
- self._body = body
-
- self._pool = pool
- self._connection = connection
-
- if hasattr(body, 'read'):
- self._fp = body
-
- # Are we using the chunked-style of transfer encoding?
- self.chunked = False
- self.chunk_left = None
- tr_enc = self.headers.get('transfer-encoding', '').lower()
- # Don't incur the penalty of creating a list and then discarding it
- encodings = (enc.strip() for enc in tr_enc.split(","))
- if "chunked" in encodings:
- self.chunked = True
-
- # Determine length of response
- self.length_remaining = self._init_length(request_method)
-
- # If requested, preload the body.
- if preload_content and not self._body:
- self._body = self.read(decode_content=decode_content)
-
- def get_redirect_location(self):
- """
- Should we redirect and where to?
-
- :returns: Truthy redirect location string if we got a redirect status
- code and valid location. ``None`` if redirect status and no
- location. ``False`` if not a redirect status code.
- """
- if self.status in self.REDIRECT_STATUSES:
- return self.headers.get('location')
-
- return False
-
- def release_conn(self):
- if not self._pool or not self._connection:
- return
-
- self._pool._put_conn(self._connection)
- self._connection = None
-
- @property
- def data(self):
- # For backwords-compat with earlier urllib3 0.4 and earlier.
- if self._body:
- return self._body
-
- if self._fp:
- return self.read(cache_content=True)
-
- @property
- def connection(self):
- return self._connection
-
- def tell(self):
- """
- Obtain the number of bytes pulled over the wire so far. May differ from
- the amount of content returned by :meth:``HTTPResponse.read`` if bytes
- are encoded on the wire (e.g, compressed).
- """
- return self._fp_bytes_read
-
- def _init_length(self, request_method):
- """
- Set initial length value for Response content if available.
- """
- length = self.headers.get('content-length')
-
- if length is not None and self.chunked:
- # This Response will fail with an IncompleteRead if it can't be
- # received as chunked. This method falls back to attempt reading
- # the response before raising an exception.
- log.warning("Received response with both Content-Length and "
- "Transfer-Encoding set. This is expressly forbidden "
- "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
- "attempting to process response as Transfer-Encoding: "
- "chunked.")
- return None
-
- elif length is not None:
- try:
- # RFC 7230 section 3.3.2 specifies multiple content lengths can
- # be sent in a single Content-Length header
- # (e.g. Content-Length: 42, 42). This line ensures the values
- # are all valid ints and that as long as the `set` length is 1,
- # all values are the same. Otherwise, the header is invalid.
- lengths = set([int(val) for val in length.split(',')])
- if len(lengths) > 1:
- raise InvalidHeader("Content-Length contained multiple "
- "unmatching values (%s)" % length)
- length = lengths.pop()
- except ValueError:
- length = None
- else:
- if length < 0:
- length = None
-
- # Convert status to int for comparison
- # In some cases, httplib returns a status of "_UNKNOWN"
- try:
- status = int(self.status)
- except ValueError:
- status = 0
-
- # Check for responses that shouldn't include a body
- if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
- length = 0
-
- return length
-
- def _init_decoder(self):
- """
- Set-up the _decoder attribute if necessary.
- """
- # Note: content-encoding value should be case-insensitive, per RFC 7230
- # Section 3.2
- content_encoding = self.headers.get('content-encoding', '').lower()
- if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
- self._decoder = _get_decoder(content_encoding)
-
- def _decode(self, data, decode_content, flush_decoder):
- """
- Decode the data passed in and potentially flush the decoder.
- """
- try:
- if decode_content and self._decoder:
- data = self._decoder.decompress(data)
- except (IOError, zlib.error) as e:
- content_encoding = self.headers.get('content-encoding', '').lower()
- raise DecodeError(
- "Received response with content-encoding: %s, but "
- "failed to decode it." % content_encoding, e)
-
- if flush_decoder and decode_content:
- data += self._flush_decoder()
-
- return data
-
- def _flush_decoder(self):
- """
- Flushes the decoder. Should only be called if the decoder is actually
- being used.
- """
- if self._decoder:
- buf = self._decoder.decompress(b'')
- return buf + self._decoder.flush()
-
- return b''
-
- @contextmanager
- def _error_catcher(self):
- """
- Catch low-level python exceptions, instead re-raising urllib3
- variants, so that low-level exceptions are not leaked in the
- high-level api.
-
- On exit, release the connection back to the pool.
- """
- clean_exit = False
-
- try:
- try:
- yield
-
- except SocketTimeout:
- # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
- # there is yet no clean way to get at it from this context.
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except BaseSSLError as e:
- # FIXME: Is there a better way to differentiate between SSLErrors?
- if 'read operation timed out' not in str(e): # Defensive:
- # This shouldn't happen but just in case we're missing an edge
- # case, let's avoid swallowing SSL errors.
- raise
-
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except (HTTPException, SocketError) as e:
- # This includes IncompleteRead.
- raise ProtocolError('Connection broken: %r' % e, e)
-
- # If no exception is thrown, we should avoid cleaning up
- # unnecessarily.
- clean_exit = True
- finally:
- # If we didn't terminate cleanly, we need to throw away our
- # connection.
- if not clean_exit:
- # The response may not be closed but we're not going to use it
- # anymore so close it now to ensure that the connection is
- # released back to the pool.
- if self._original_response:
- self._original_response.close()
-
- # Closing the response may not actually be sufficient to close
- # everything, so if we have a hold of the connection close that
- # too.
- if self._connection:
- self._connection.close()
-
- # If we hold the original response but it's closed now, we should
- # return the connection back to the pool.
- if self._original_response and self._original_response.isclosed():
- self.release_conn()
-
- def read(self, amt=None, decode_content=None, cache_content=False):
- """
- Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
- parameters: ``decode_content`` and ``cache_content``.
-
- :param amt:
- How much of the content to read. If specified, caching is skipped
- because it doesn't make sense to cache partial content as the full
- response.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
-
- :param cache_content:
- If True, will save the returned data such that the same result is
- returned despite of the state of the underlying file object. This
- is useful if you want the ``.data`` property to continue working
- after having ``.read()`` the file object. (Overridden if ``amt`` is
- set.)
- """
- self._init_decoder()
- if decode_content is None:
- decode_content = self.decode_content
-
- if self._fp is None:
- return
-
- flush_decoder = False
- data = None
-
- with self._error_catcher():
- if amt is None:
- # cStringIO doesn't like amt=None
- data = self._fp.read()
- flush_decoder = True
- else:
- cache_content = False
- data = self._fp.read(amt)
- if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
- # Close the connection when no data is returned
- #
- # This is redundant to what httplib/http.client _should_
- # already do. However, versions of python released before
- # December 15, 2012 (http://bugs.python.org/issue16298) do
- # not properly close the connection in all cases. There is
- # no harm in redundantly calling close.
- self._fp.close()
- flush_decoder = True
- if self.enforce_content_length and self.length_remaining not in (0, None):
- # This is an edge case that httplib failed to cover due
- # to concerns of backward compatibility. We're
- # addressing it here to make sure IncompleteRead is
- # raised during streaming, so all calls with incorrect
- # Content-Length are caught.
- raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
-
- if data:
- self._fp_bytes_read += len(data)
- if self.length_remaining is not None:
- self.length_remaining -= len(data)
-
- data = self._decode(data, decode_content, flush_decoder)
-
- if cache_content:
- self._body = data
-
- return data
-
- def stream(self, amt=2**16, decode_content=None):
- """
- A generator wrapper for the read() method. A call will block until
- ``amt`` bytes have been read from the connection or until the
- connection is closed.
-
- :param amt:
- How much of the content to read. The generator will return up to
- much data per iteration, but may return less. This is particularly
- likely when using compressed data. However, the empty string will
- never be returned.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- if self.chunked and self.supports_chunked_reads():
- for line in self.read_chunked(amt, decode_content=decode_content):
- yield line
- else:
- while not is_fp_closed(self._fp):
- data = self.read(amt=amt, decode_content=decode_content)
-
- if data:
- yield data
-
- @classmethod
- def from_httplib(ResponseCls, r, **response_kw):
- """
- Given an :class:`httplib.HTTPResponse` instance ``r``, return a
- corresponding :class:`urllib3.response.HTTPResponse` object.
-
- Remaining parameters are passed to the HTTPResponse constructor, along
- with ``original_response=r``.
- """
- headers = r.msg
-
- if not isinstance(headers, HTTPHeaderDict):
- if PY3: # Python 3
- headers = HTTPHeaderDict(headers.items())
- else: # Python 2
- headers = HTTPHeaderDict.from_httplib(headers)
-
- # HTTPResponse objects in Python 3 don't have a .strict attribute
- strict = getattr(r, 'strict', 0)
- resp = ResponseCls(body=r,
- headers=headers,
- status=r.status,
- version=r.version,
- reason=r.reason,
- strict=strict,
- original_response=r,
- **response_kw)
- return resp
-
- # Backwards-compatibility methods for httplib.HTTPResponse
- def getheaders(self):
- return self.headers
-
- def getheader(self, name, default=None):
- return self.headers.get(name, default)
-
- # Overrides from io.IOBase
- def close(self):
- if not self.closed:
- self._fp.close()
-
- if self._connection:
- self._connection.close()
-
- @property
- def closed(self):
- if self._fp is None:
- return True
- elif hasattr(self._fp, 'isclosed'):
- return self._fp.isclosed()
- elif hasattr(self._fp, 'closed'):
- return self._fp.closed
- else:
- return True
-
- def fileno(self):
- if self._fp is None:
- raise IOError("HTTPResponse has no file to get a fileno from")
- elif hasattr(self._fp, "fileno"):
- return self._fp.fileno()
- else:
- raise IOError("The file-like object this HTTPResponse is wrapped "
- "around has no file descriptor")
-
- def flush(self):
- if self._fp is not None and hasattr(self._fp, 'flush'):
- return self._fp.flush()
-
- def readable(self):
- # This method is required for `io` module compatibility.
- return True
-
- def readinto(self, b):
- # This method is required for `io` module compatibility.
- temp = self.read(len(b))
- if len(temp) == 0:
- return 0
- else:
- b[:len(temp)] = temp
- return len(temp)
-
- def supports_chunked_reads(self):
- """
- Checks if the underlying file-like object looks like a
- httplib.HTTPResponse object. We do this by testing for the fp
- attribute. If it is present we assume it returns raw chunks as
- processed by read_chunked().
- """
- return hasattr(self._fp, 'fp')
-
- def _update_chunk_length(self):
- # First, we'll figure out length of a chunk and then
- # we'll try to read it from socket.
- if self.chunk_left is not None:
- return
- line = self._fp.fp.readline()
- line = line.split(b';', 1)[0]
- try:
- self.chunk_left = int(line, 16)
- except ValueError:
- # Invalid chunked protocol response, abort.
- self.close()
- raise httplib.IncompleteRead(line)
-
- def _handle_chunk(self, amt):
- returned_chunk = None
- if amt is None:
- chunk = self._fp._safe_read(self.chunk_left)
- returned_chunk = chunk
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- elif amt < self.chunk_left:
- value = self._fp._safe_read(amt)
- self.chunk_left = self.chunk_left - amt
- returned_chunk = value
- elif amt == self.chunk_left:
- value = self._fp._safe_read(amt)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- returned_chunk = value
- else: # amt > self.chunk_left
- returned_chunk = self._fp._safe_read(self.chunk_left)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- return returned_chunk
-
- def read_chunked(self, amt=None, decode_content=None):
- """
- Similar to :meth:`HTTPResponse.read`, but with an additional
- parameter: ``decode_content``.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- self._init_decoder()
- # FIXME: Rewrite this method and make it a class with a better structured logic.
- if not self.chunked:
- raise ResponseNotChunked(
- "Response is not chunked. "
- "Header 'transfer-encoding: chunked' is missing.")
- if not self.supports_chunked_reads():
- raise BodyNotHttplibCompatible(
- "Body should be httplib.HTTPResponse like. "
- "It should have have an fp attribute which returns raw chunks.")
-
- # Don't bother reading the body of a HEAD request.
- if self._original_response and is_response_to_head(self._original_response):
- self._original_response.close()
- return
-
- with self._error_catcher():
- while True:
- self._update_chunk_length()
- if self.chunk_left == 0:
- break
- chunk = self._handle_chunk(amt)
- decoded = self._decode(chunk, decode_content=decode_content,
- flush_decoder=False)
- if decoded:
- yield decoded
-
- if decode_content:
- # On CPython and PyPy, we should never need to flush the
- # decoder. However, on Jython we *might* need to, so
- # lets defensively do it anyway.
- decoded = self._flush_decoder()
- if decoded: # Platform-specific: Jython.
- yield decoded
-
- # Chunk content ends with \r\n: discard it.
- while True:
- line = self._fp.fp.readline()
- if not line:
- # Some sites may not end with '\r\n'.
- break
- if line == b'\r\n':
- break
-
- # We read everything; close the "file".
- if self._original_response:
- self._original_response.close()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
deleted file mode 100644
index bba628d98..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-# For backwards compatibility, provide imports that used to be here.
-from .connection import is_connection_dropped
-from .request import make_headers
-from .response import is_fp_closed
-from .ssl_ import (
- SSLContext,
- HAS_SNI,
- IS_PYOPENSSL,
- IS_SECURETRANSPORT,
- assert_fingerprint,
- resolve_cert_reqs,
- resolve_ssl_version,
- ssl_wrap_socket,
-)
-from .timeout import (
- current_time,
- Timeout,
-)
-
-from .retry import Retry
-from .url import (
- get_host,
- parse_url,
- split_first,
- Url,
-)
-from .wait import (
- wait_for_read,
- wait_for_write
-)
-
-__all__ = (
- 'HAS_SNI',
- 'IS_PYOPENSSL',
- 'IS_SECURETRANSPORT',
- 'SSLContext',
- 'Retry',
- 'Timeout',
- 'Url',
- 'assert_fingerprint',
- 'current_time',
- 'is_connection_dropped',
- 'is_fp_closed',
- 'get_host',
- 'parse_url',
- 'make_headers',
- 'resolve_cert_reqs',
- 'resolve_ssl_version',
- 'split_first',
- 'ssl_wrap_socket',
- 'wait_for_read',
- 'wait_for_write'
-)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
deleted file mode 100644
index 3bd69e8fa..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import socket
-from .wait import wait_for_read
-from .selectors import HAS_SELECT, SelectorError
-
-
-def is_connection_dropped(conn): # Platform-specific
- """
- Returns True if the connection is dropped and should be closed.
-
- :param conn:
- :class:`httplib.HTTPConnection` object.
-
- Note: For platforms like AppEngine, this will always return ``False`` to
- let the platform handle connection recycling transparently for us.
- """
- sock = getattr(conn, 'sock', False)
- if sock is False: # Platform-specific: AppEngine
- return False
- if sock is None: # Connection already closed (such as by httplib).
- return True
-
- if not HAS_SELECT:
- return False
-
- try:
- return bool(wait_for_read(sock, timeout=0.0))
- except SelectorError:
- return True
-
-
-# This function is copied from socket.py in the Python 2.7 standard
-# library test suite. Added to its signature is only `socket_options`.
-# One additional modification is that we avoid binding to IPv6 servers
-# discovered in DNS if the system doesn't have IPv6 functionality.
-def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- source_address=None, socket_options=None):
- """Connect to *address* and return the socket object.
-
- Convenience function. Connect to *address* (a 2-tuple ``(host,
- port)``) and return the socket object. Passing the optional
- *timeout* parameter will set the timeout on the socket instance
- before attempting to connect. If no *timeout* is supplied, the
- global default timeout setting returned by :func:`getdefaulttimeout`
- is used. If *source_address* is set it must be a tuple of (host, port)
- for the socket to bind as a source address before making the connection.
- An host of '' or port 0 tells the OS to use the default.
- """
-
- host, port = address
- if host.startswith('['):
- host = host.strip('[]')
- err = None
-
- # Using the value from allowed_gai_family() in the context of getaddrinfo lets
- # us select whether to work with IPv4 DNS records, IPv6 records, or both.
- # The original create_connection function always returns all records.
- family = allowed_gai_family()
-
- for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket.socket(af, socktype, proto)
-
- # If provided, set socket level options before connecting.
- _set_socket_options(sock, socket_options)
-
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- if source_address:
- sock.bind(source_address)
- sock.connect(sa)
- return sock
-
- except socket.error as e:
- err = e
- if sock is not None:
- sock.close()
- sock = None
-
- if err is not None:
- raise err
-
- raise socket.error("getaddrinfo returns an empty list")
-
-
-def _set_socket_options(sock, options):
- if options is None:
- return
-
- for opt in options:
- sock.setsockopt(*opt)
-
-
-def allowed_gai_family():
- """This function is designed to work in the context of
- getaddrinfo, where family=socket.AF_UNSPEC is the default and
- will perform a DNS search for both IPv6 and IPv4 records."""
-
- family = socket.AF_INET
- if HAS_IPV6:
- family = socket.AF_UNSPEC
- return family
-
-
-def _has_ipv6(host):
- """ Returns True if the system can bind an IPv6 address. """
- sock = None
- has_ipv6 = False
-
- if socket.has_ipv6:
- # has_ipv6 returns true if cPython was compiled with IPv6 support.
- # It does not tell us if the system has IPv6 support enabled. To
- # determine that we must bind to an IPv6 address.
- # https://github.com/shazow/urllib3/pull/611
- # https://bugs.python.org/issue658327
- try:
- sock = socket.socket(socket.AF_INET6)
- sock.bind((host, 0))
- has_ipv6 = True
- except Exception:
- pass
-
- if sock:
- sock.close()
- return has_ipv6
-
-
-HAS_IPV6 = _has_ipv6('::1')
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/collectors/python.d.plugin/python_modules/urllib3/util/request.py
deleted file mode 100644
index 18f27b032..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/request.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from base64 import b64encode
-
-from ..packages.six import b, integer_types
-from ..exceptions import UnrewindableBodyError
-
-ACCEPT_ENCODING = 'gzip,deflate'
-_FAILEDTELL = object()
-
-
-def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
- basic_auth=None, proxy_basic_auth=None, disable_cache=None):
- """
- Shortcuts for generating request headers.
-
- :param keep_alive:
- If ``True``, adds 'connection: keep-alive' header.
-
- :param accept_encoding:
- Can be a boolean, list, or string.
- ``True`` translates to 'gzip,deflate'.
- List will get joined by comma.
- String will be used as provided.
-
- :param user_agent:
- String representing the user-agent you want, such as
- "python-urllib3/0.6"
-
- :param basic_auth:
- Colon-separated username:password string for 'authorization: basic ...'
- auth header.
-
- :param proxy_basic_auth:
- Colon-separated username:password string for 'proxy-authorization: basic ...'
- auth header.
-
- :param disable_cache:
- If ``True``, adds 'cache-control: no-cache' header.
-
- Example::
-
- >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
- {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
- >>> make_headers(accept_encoding=True)
- {'accept-encoding': 'gzip,deflate'}
- """
- headers = {}
- if accept_encoding:
- if isinstance(accept_encoding, str):
- pass
- elif isinstance(accept_encoding, list):
- accept_encoding = ','.join(accept_encoding)
- else:
- accept_encoding = ACCEPT_ENCODING
- headers['accept-encoding'] = accept_encoding
-
- if user_agent:
- headers['user-agent'] = user_agent
-
- if keep_alive:
- headers['connection'] = 'keep-alive'
-
- if basic_auth:
- headers['authorization'] = 'Basic ' + \
- b64encode(b(basic_auth)).decode('utf-8')
-
- if proxy_basic_auth:
- headers['proxy-authorization'] = 'Basic ' + \
- b64encode(b(proxy_basic_auth)).decode('utf-8')
-
- if disable_cache:
- headers['cache-control'] = 'no-cache'
-
- return headers
-
-
-def set_file_position(body, pos):
- """
- If a position is provided, move file to that point.
- Otherwise, we'll attempt to record a position for future use.
- """
- if pos is not None:
- rewind_body(body, pos)
- elif getattr(body, 'tell', None) is not None:
- try:
- pos = body.tell()
- except (IOError, OSError):
- # This differentiates from None, allowing us to catch
- # a failed `tell()` later when trying to rewind the body.
- pos = _FAILEDTELL
-
- return pos
-
-
-def rewind_body(body, body_pos):
- """
- Attempt to rewind body to a certain position.
- Primarily used for request redirects and retries.
-
- :param body:
- File-like object that supports seek.
-
- :param int pos:
- Position to seek to in file.
- """
- body_seek = getattr(body, 'seek', None)
- if body_seek is not None and isinstance(body_pos, integer_types):
- try:
- body_seek(body_pos)
- except (IOError, OSError):
- raise UnrewindableBodyError("An error occurred when rewinding request "
- "body for redirect/retry.")
- elif body_pos is _FAILEDTELL:
- raise UnrewindableBodyError("Unable to record file position for rewinding "
- "request body during a redirect/retry.")
- else:
- raise ValueError("body_pos must be of type integer, "
- "instead it was %s." % type(body_pos))
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/collectors/python.d.plugin/python_modules/urllib3/util/response.py
deleted file mode 100644
index e4cda93d4..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/response.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from ..packages.six.moves import http_client as httplib
-
-from ..exceptions import HeaderParsingError
-
-
-def is_fp_closed(obj):
- """
- Checks whether a given file-like object is closed.
-
- :param obj:
- The file-like object to check.
- """
-
- try:
- # Check `isclosed()` first, in case Python3 doesn't set `closed`.
- # GH Issue #928
- return obj.isclosed()
- except AttributeError:
- pass
-
- try:
- # Check via the official file-like-object way.
- return obj.closed
- except AttributeError:
- pass
-
- try:
- # Check if the object is a container for another file-like object that
- # gets released on exhaustion (e.g. HTTPResponse).
- return obj.fp is None
- except AttributeError:
- pass
-
- raise ValueError("Unable to determine whether fp is closed.")
-
-
-def assert_header_parsing(headers):
- """
- Asserts whether all headers have been successfully parsed.
- Extracts encountered errors from the result of parsing headers.
-
- Only works on Python 3.
-
- :param headers: Headers to verify.
- :type headers: `httplib.HTTPMessage`.
-
- :raises urllib3.exceptions.HeaderParsingError:
- If parsing errors are found.
- """
-
- # This will fail silently if we pass in the wrong kind of parameter.
- # To make debugging easier add an explicit check.
- if not isinstance(headers, httplib.HTTPMessage):
- raise TypeError('expected httplib.Message, got {0}.'.format(
- type(headers)))
-
- defects = getattr(headers, 'defects', None)
- get_payload = getattr(headers, 'get_payload', None)
-
- unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
-
- if defects or unparsed_data:
- raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
-
-
-def is_response_to_head(response):
- """
- Checks whether the request of a response has been a HEAD-request.
- Handles the quirks of AppEngine.
-
- :param conn:
- :type conn: :class:`httplib.HTTPResponse`
- """
- # FIXME: Can we do this somehow without accessing private httplib _method?
- method = response._method
- if isinstance(method, int): # Platform-specific: Appengine
- return method == 3
- return method.upper() == 'HEAD'
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
deleted file mode 100644
index 61e63afec..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import time
-import logging
-from collections import namedtuple
-from itertools import takewhile
-import email
-import re
-
-from ..exceptions import (
- ConnectTimeoutError,
- MaxRetryError,
- ProtocolError,
- ReadTimeoutError,
- ResponseError,
- InvalidHeader,
-)
-from ..packages import six
-
-
-log = logging.getLogger(__name__)
-
-# Data structure for representing the metadata of requests that result in a retry.
-RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
- "status", "redirect_location"])
-
-
-class Retry(object):
- """ Retry configuration.
-
- Each retry attempt will create a new Retry object with updated values, so
- they can be safely reused.
-
- Retries can be defined as a default for a pool::
-
- retries = Retry(connect=5, read=2, redirect=5)
- http = PoolManager(retries=retries)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', retries=Retry(10))
-
- Retries can be disabled by passing ``False``::
-
- response = http.request('GET', 'http://example.com/', retries=False)
-
- Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
- retries are disabled, in which case the causing exception will be raised.
-
- :param int total:
- Total number of retries to allow. Takes precedence over other counts.
-
- Set to ``None`` to remove this constraint and fall back on other
- counts. It's a good idea to set this to some sensibly-high value to
- account for unexpected edge cases and avoid infinite retry loops.
-
- Set to ``0`` to fail on the first retry.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int connect:
- How many connection-related errors to retry on.
-
- These are errors raised before the request is sent to the remote server,
- which we assume has not triggered the server to process the request.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int read:
- How many times to retry on read errors.
-
- These errors are raised after the request was sent to the server, so the
- request may have side-effects.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int redirect:
- How many redirects to perform. Limit this to avoid infinite redirect
- loops.
-
- A redirect is a HTTP response with a status code 301, 302, 303, 307 or
- 308.
-
- Set to ``0`` to fail on the first retry of this type.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int status:
- How many times to retry on bad status codes.
-
- These are retries made on responses, where status code matches
- ``status_forcelist``.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param iterable method_whitelist:
- Set of uppercased HTTP method verbs that we should retry on.
-
- By default, we only retry on methods which are considered to be
- idempotent (multiple requests with the same parameters end with the
- same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
-
- Set to a ``False`` value to retry on any verb.
-
- :param iterable status_forcelist:
- A set of integer HTTP status codes that we should force a retry on.
- A retry is initiated if the request method is in ``method_whitelist``
- and the response status code is in ``status_forcelist``.
-
- By default, this is disabled with ``None``.
-
- :param float backoff_factor:
- A backoff factor to apply between attempts after the second try
- (most errors are resolved immediately by a second try without a
- delay). urllib3 will sleep for::
-
- {backoff factor} * (2 ^ ({number of total retries} - 1))
-
- seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
- for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
- than :attr:`Retry.BACKOFF_MAX`.
-
- By default, backoff is disabled (set to 0).
-
- :param bool raise_on_redirect: Whether, if the number of redirects is
- exhausted, to raise a MaxRetryError, or to return a response with a
- response code in the 3xx range.
-
- :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
- whether we should raise an exception, or return a response,
- if status falls in ``status_forcelist`` range and retries have
- been exhausted.
-
- :param tuple history: The history of the request encountered during
- each call to :meth:`~Retry.increment`. The list is in the order
- the requests occurred. Each list item is of class :class:`RequestHistory`.
-
- :param bool respect_retry_after_header:
- Whether to respect Retry-After header on status codes defined as
- :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
-
- """
-
- DEFAULT_METHOD_WHITELIST = frozenset([
- 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
-
- RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
-
- #: Maximum backoff time.
- BACKOFF_MAX = 120
-
- def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
- method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
- backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
- history=None, respect_retry_after_header=True):
-
- self.total = total
- self.connect = connect
- self.read = read
- self.status = status
-
- if redirect is False or total is False:
- redirect = 0
- raise_on_redirect = False
-
- self.redirect = redirect
- self.status_forcelist = status_forcelist or set()
- self.method_whitelist = method_whitelist
- self.backoff_factor = backoff_factor
- self.raise_on_redirect = raise_on_redirect
- self.raise_on_status = raise_on_status
- self.history = history or tuple()
- self.respect_retry_after_header = respect_retry_after_header
-
- def new(self, **kw):
- params = dict(
- total=self.total,
- connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
- method_whitelist=self.method_whitelist,
- status_forcelist=self.status_forcelist,
- backoff_factor=self.backoff_factor,
- raise_on_redirect=self.raise_on_redirect,
- raise_on_status=self.raise_on_status,
- history=self.history,
- )
- params.update(kw)
- return type(self)(**params)
-
- @classmethod
- def from_int(cls, retries, redirect=True, default=None):
- """ Backwards-compatibility for the old retries format."""
- if retries is None:
- retries = default if default is not None else cls.DEFAULT
-
- if isinstance(retries, Retry):
- return retries
-
- redirect = bool(redirect) and None
- new_retries = cls(retries, redirect=redirect)
- log.debug("Converted retries value: %r -> %r", retries, new_retries)
- return new_retries
-
- def get_backoff_time(self):
- """ Formula for computing the current backoff
-
- :rtype: float
- """
- # We want to consider only the last consecutive errors sequence (Ignore redirects).
- consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
- reversed(self.history))))
- if consecutive_errors_len <= 1:
- return 0
-
- backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
- return min(self.BACKOFF_MAX, backoff_value)
-
- def parse_retry_after(self, retry_after):
- # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
- if re.match(r"^\s*[0-9]+\s*$", retry_after):
- seconds = int(retry_after)
- else:
- retry_date_tuple = email.utils.parsedate(retry_after)
- if retry_date_tuple is None:
- raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
- retry_date = time.mktime(retry_date_tuple)
- seconds = retry_date - time.time()
-
- if seconds < 0:
- seconds = 0
-
- return seconds
-
- def get_retry_after(self, response):
- """ Get the value of Retry-After in seconds. """
-
- retry_after = response.getheader("Retry-After")
-
- if retry_after is None:
- return None
-
- return self.parse_retry_after(retry_after)
-
- def sleep_for_retry(self, response=None):
- retry_after = self.get_retry_after(response)
- if retry_after:
- time.sleep(retry_after)
- return True
-
- return False
-
- def _sleep_backoff(self):
- backoff = self.get_backoff_time()
- if backoff <= 0:
- return
- time.sleep(backoff)
-
- def sleep(self, response=None):
- """ Sleep between retry attempts.
-
- This method will respect a server's ``Retry-After`` response header
- and sleep the duration of the time requested. If that is not present, it
- will use an exponential backoff. By default, the backoff factor is 0 and
- this method will return immediately.
- """
-
- if response:
- slept = self.sleep_for_retry(response)
- if slept:
- return
-
- self._sleep_backoff()
-
- def _is_connection_error(self, err):
- """ Errors when we're fairly sure that the server did not receive the
- request, so it should be safe to retry.
- """
- return isinstance(err, ConnectTimeoutError)
-
- def _is_read_error(self, err):
- """ Errors that occur after the request has been started, so we should
- assume that the server began processing it.
- """
- return isinstance(err, (ReadTimeoutError, ProtocolError))
-
- def _is_method_retryable(self, method):
- """ Checks if a given HTTP method should be retried upon, depending if
- it is included on the method whitelist.
- """
- if self.method_whitelist and method.upper() not in self.method_whitelist:
- return False
-
- return True
-
- def is_retry(self, method, status_code, has_retry_after=False):
- """ Is this method/status code retryable? (Based on whitelists and control
- variables such as the number of total retries to allow, whether to
- respect the Retry-After header, whether this header is present, and
- whether the returned status code is on the list of status codes to
- be retried upon on the presence of the aforementioned header)
- """
- if not self._is_method_retryable(method):
- return False
-
- if self.status_forcelist and status_code in self.status_forcelist:
- return True
-
- return (self.total and self.respect_retry_after_header and
- has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
-
- def is_exhausted(self):
- """ Are we out of retries? """
- retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
- retry_counts = list(filter(None, retry_counts))
- if not retry_counts:
- return False
-
- return min(retry_counts) < 0
-
- def increment(self, method=None, url=None, response=None, error=None,
- _pool=None, _stacktrace=None):
- """ Return a new Retry object with incremented retry counters.
-
- :param response: A response object, or None, if the server did not
- return a response.
- :type response: :class:`~urllib3.response.HTTPResponse`
- :param Exception error: An error encountered during the request, or
- None if the response was received successfully.
-
- :return: A new ``Retry`` object.
- """
- if self.total is False and error:
- # Disabled, indicate to re-raise the error.
- raise six.reraise(type(error), error, _stacktrace)
-
- total = self.total
- if total is not None:
- total -= 1
-
- connect = self.connect
- read = self.read
- redirect = self.redirect
- status_count = self.status
- cause = 'unknown'
- status = None
- redirect_location = None
-
- if error and self._is_connection_error(error):
- # Connect retry?
- if connect is False:
- raise six.reraise(type(error), error, _stacktrace)
- elif connect is not None:
- connect -= 1
-
- elif error and self._is_read_error(error):
- # Read retry?
- if read is False or not self._is_method_retryable(method):
- raise six.reraise(type(error), error, _stacktrace)
- elif read is not None:
- read -= 1
-
- elif response and response.get_redirect_location():
- # Redirect retry?
- if redirect is not None:
- redirect -= 1
- cause = 'too many redirects'
- redirect_location = response.get_redirect_location()
- status = response.status
-
- else:
- # Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
- cause = ResponseError.GENERIC_ERROR
- if response and response.status:
- if status_count is not None:
- status_count -= 1
- cause = ResponseError.SPECIFIC_ERROR.format(
- status_code=response.status)
- status = response.status
-
- history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
-
- new_retry = self.new(
- total=total,
- connect=connect, read=read, redirect=redirect, status=status_count,
- history=history)
-
- if new_retry.is_exhausted():
- raise MaxRetryError(_pool, url, error or ResponseError(cause))
-
- log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
-
- return new_retry
-
- def __repr__(self):
- return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
- 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
- cls=type(self), self=self)
-
-
-# For backwards compatibility (equivalent to pre-v1.9):
-Retry.DEFAULT = Retry(3)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
deleted file mode 100644
index de5e49838..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
+++ /dev/null
@@ -1,588 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Backport of selectors.py from Python 3.5+ to support Python < 3.4
-# Also has the behavior specified in PEP 475 which is to retry syscalls
-# in the case of an EINTR error. This module is required because selectors34
-# does not follow this behavior and instead returns that no dile descriptor
-# events have occurred rather than retry the syscall. The decision to drop
-# support for select.devpoll is made to maintain 100% test coverage.
-
-import errno
-import math
-import select
-import socket
-import sys
-import time
-
-from collections import namedtuple
-
-try:
- from collections import Mapping
-except ImportError:
- from collections.abc import Mapping
-
-try:
- monotonic = time.monotonic
-except (AttributeError, ImportError): # Python 3.3<
- monotonic = time.time
-
-EVENT_READ = (1 << 0)
-EVENT_WRITE = (1 << 1)
-
-HAS_SELECT = True # Variable that shows whether the platform has a selector.
-_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
-_DEFAULT_SELECTOR = None
-
-
-class SelectorError(Exception):
- def __init__(self, errcode):
- super(SelectorError, self).__init__()
- self.errno = errcode
-
- def __repr__(self):
- return "<SelectorError errno={0}>".format(self.errno)
-
- def __str__(self):
- return self.__repr__()
-
-
-def _fileobj_to_fd(fileobj):
- """ Return a file descriptor from a file object. If
- given an integer will simply return that integer back. """
- if isinstance(fileobj, int):
- fd = fileobj
- else:
- try:
- fd = int(fileobj.fileno())
- except (AttributeError, TypeError, ValueError):
- raise ValueError("Invalid file object: {0!r}".format(fileobj))
- if fd < 0:
- raise ValueError("Invalid file descriptor: {0}".format(fd))
- return fd
-
-
-# Determine which function to use to wrap system calls because Python 3.5+
-# already handles the case when system calls are interrupted.
-if sys.version_info >= (3, 5):
- def _syscall_wrapper(func, _, *args, **kwargs):
- """ This is the short-circuit version of the below logic
- because in Python 3.5+ all system calls automatically restart
- and recalculate their timeouts. """
- try:
- return func(*args, **kwargs)
- except (OSError, IOError, select.error) as e:
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- raise SelectorError(errcode)
-else:
- def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
- """ Wrapper function for syscalls that could fail due to EINTR.
- All functions should be retried if there is time left in the timeout
- in accordance with PEP 475. """
- timeout = kwargs.get("timeout", None)
- if timeout is None:
- expires = None
- recalc_timeout = False
- else:
- timeout = float(timeout)
- if timeout < 0.0: # Timeout less than 0 treated as no timeout.
- expires = None
- else:
- expires = monotonic() + timeout
-
- args = list(args)
- if recalc_timeout and "timeout" not in kwargs:
- raise ValueError(
- "Timeout must be in args or kwargs to be recalculated")
-
- result = _SYSCALL_SENTINEL
- while result is _SYSCALL_SENTINEL:
- try:
- result = func(*args, **kwargs)
- # OSError is thrown by select.select
- # IOError is thrown by select.epoll.poll
- # select.error is thrown by select.poll.poll
- # Aren't we thankful for Python 3.x rework for exceptions?
- except (OSError, IOError, select.error) as e:
- # select.error wasn't a subclass of OSError in the past.
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- elif hasattr(e, "args"):
- errcode = e.args[0]
-
- # Also test for the Windows equivalent of EINTR.
- is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
- errcode == errno.WSAEINTR))
-
- if is_interrupt:
- if expires is not None:
- current_time = monotonic()
- if current_time > expires:
- raise OSError(errno=errno.ETIMEDOUT)
- if recalc_timeout:
- if "timeout" in kwargs:
- kwargs["timeout"] = expires - current_time
- continue
- if errcode:
- raise SelectorError(errcode)
- else:
- raise
- return result
-
-
-SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
-
-
-class _SelectorMapping(Mapping):
- """ Mapping of file objects to selector keys """
-
- def __init__(self, selector):
- self._selector = selector
-
- def __len__(self):
- return len(self._selector._fd_to_key)
-
- def __getitem__(self, fileobj):
- try:
- fd = self._selector._fileobj_lookup(fileobj)
- return self._selector._fd_to_key[fd]
- except KeyError:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- def __iter__(self):
- return iter(self._selector._fd_to_key)
-
-
-class BaseSelector(object):
- """ Abstract Selector class
-
- A selector supports registering file objects to be monitored
- for specific I/O events.
-
- A file object is a file descriptor or any object with a
- `fileno()` method. An arbitrary object can be attached to the
- file object which can be used for example to store context info,
- a callback, etc.
-
- A selector can use various implementations (select(), poll(), epoll(),
- and kqueue()) depending on the platform. The 'DefaultSelector' class uses
- the most efficient implementation for the current platform.
- """
- def __init__(self):
- # Maps file descriptors to keys.
- self._fd_to_key = {}
-
- # Read-only mapping returned by get_map()
- self._map = _SelectorMapping(self)
-
- def _fileobj_lookup(self, fileobj):
- """ Return a file descriptor from a file object.
- This wraps _fileobj_to_fd() to do an exhaustive
- search in case the object is invalid but we still
- have it in our map. Used by unregister() so we can
- unregister an object that was previously registered
- even if it is closed. It is also used by _SelectorMapping
- """
- try:
- return _fileobj_to_fd(fileobj)
- except ValueError:
-
- # Search through all our mapped keys.
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- return key.fd
-
- # Raise ValueError after all.
- raise
-
- def register(self, fileobj, events, data=None):
- """ Register a file object for a set of events to monitor. """
- if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {0!r}".format(events))
-
- key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
-
- if key.fd in self._fd_to_key:
- raise KeyError("{0!r} (FD {1}) is already registered"
- .format(fileobj, key.fd))
-
- self._fd_to_key[key.fd] = key
- return key
-
- def unregister(self, fileobj):
- """ Unregister a file object from being monitored. """
- try:
- key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- # Getting the fileno of a closed socket on Windows errors with EBADF.
- except socket.error as e: # Platform-specific: Windows.
- if e.errno != errno.EBADF:
- raise
- else:
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- self._fd_to_key.pop(key.fd)
- break
- else:
- raise KeyError("{0!r} is not registered".format(fileobj))
- return key
-
- def modify(self, fileobj, events, data=None):
- """ Change a registered file object monitored events and data. """
- # NOTE: Some subclasses optimize this operation even further.
- try:
- key = self._fd_to_key[self._fileobj_lookup(fileobj)]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- if events != key.events:
- self.unregister(fileobj)
- key = self.register(fileobj, events, data)
-
- elif data != key.data:
- # Use a shortcut to update the data.
- key = key._replace(data=data)
- self._fd_to_key[key.fd] = key
-
- return key
-
- def select(self, timeout=None):
- """ Perform the actual selection until some monitored file objects
- are ready or the timeout expires. """
- raise NotImplementedError()
-
- def close(self):
- """ Close the selector. This must be called to ensure that all
- underlying resources are freed. """
- self._fd_to_key.clear()
- self._map = None
-
- def get_key(self, fileobj):
- """ Return the key associated with a registered file object. """
- mapping = self.get_map()
- if mapping is None:
- raise RuntimeError("Selector is closed")
- try:
- return mapping[fileobj]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- def get_map(self):
- """ Return a mapping of file objects to selector keys """
- return self._map
-
- def _key_from_fd(self, fd):
- """ Return the key associated to a given file descriptor
- Return None if it is not found. """
- try:
- return self._fd_to_key[fd]
- except KeyError:
- return None
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- self.close()
-
-
-# Almost all platforms have select.select()
-if hasattr(select, "select"):
- class SelectSelector(BaseSelector):
- """ Select-based selector. """
- def __init__(self):
- super(SelectSelector, self).__init__()
- self._readers = set()
- self._writers = set()
-
- def register(self, fileobj, events, data=None):
- key = super(SelectSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- self._readers.add(key.fd)
- if events & EVENT_WRITE:
- self._writers.add(key.fd)
- return key
-
- def unregister(self, fileobj):
- key = super(SelectSelector, self).unregister(fileobj)
- self._readers.discard(key.fd)
- self._writers.discard(key.fd)
- return key
-
- def _select(self, r, w, timeout=None):
- """ Wrapper for select.select because timeout is a positional arg """
- return select.select(r, w, [], timeout)
-
- def select(self, timeout=None):
- # Selecting on empty lists on Windows errors out.
- if not len(self._readers) and not len(self._writers):
- return []
-
- timeout = None if timeout is None else max(timeout, 0.0)
- ready = []
- r, w, _ = _syscall_wrapper(self._select, True, self._readers,
- self._writers, timeout)
- r = set(r)
- w = set(w)
- for fd in r | w:
- events = 0
- if fd in r:
- events |= EVENT_READ
- if fd in w:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
-
-if hasattr(select, "poll"):
- class PollSelector(BaseSelector):
- """ Poll-based selector """
- def __init__(self):
- super(PollSelector, self).__init__()
- self._poll = select.poll()
-
- def register(self, fileobj, events, data=None):
- key = super(PollSelector, self).register(fileobj, events, data)
- event_mask = 0
- if events & EVENT_READ:
- event_mask |= select.POLLIN
- if events & EVENT_WRITE:
- event_mask |= select.POLLOUT
- self._poll.register(key.fd, event_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(PollSelector, self).unregister(fileobj)
- self._poll.unregister(key.fd)
- return key
-
- def _wrap_poll(self, timeout=None):
- """ Wrapper function for select.poll.poll() so that
- _syscall_wrapper can work with only seconds. """
- if timeout is not None:
- if timeout <= 0:
- timeout = 0
- else:
- # select.poll.poll() has a resolution of 1 millisecond,
- # round away from zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1e3)
-
- result = self._poll.poll(timeout)
- return result
-
- def select(self, timeout=None):
- ready = []
- fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.POLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.POLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
-
- return ready
-
-
-if hasattr(select, "epoll"):
- class EpollSelector(BaseSelector):
- """ Epoll-based selector """
- def __init__(self):
- super(EpollSelector, self).__init__()
- self._epoll = select.epoll()
-
- def fileno(self):
- return self._epoll.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(EpollSelector, self).register(fileobj, events, data)
- events_mask = 0
- if events & EVENT_READ:
- events_mask |= select.EPOLLIN
- if events & EVENT_WRITE:
- events_mask |= select.EPOLLOUT
- _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(EpollSelector, self).unregister(fileobj)
- try:
- _syscall_wrapper(self._epoll.unregister, False, key.fd)
- except SelectorError:
- # This can occur when the fd was closed since registry.
- pass
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- if timeout <= 0:
- timeout = 0.0
- else:
- # select.epoll.poll() has a resolution of 1 millisecond
- # but luckily takes seconds so we don't need a wrapper
- # like PollSelector. Just for better rounding.
- timeout = math.ceil(timeout * 1e3) * 1e-3
- timeout = float(timeout)
- else:
- timeout = -1.0 # epoll.poll() must have a float.
-
- # We always want at least 1 to ensure that select can be called
- # with no file descriptors registered. Otherwise will fail.
- max_events = max(len(self._fd_to_key), 1)
-
- ready = []
- fd_events = _syscall_wrapper(self._epoll.poll, True,
- timeout=timeout,
- maxevents=max_events)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.EPOLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.EPOLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
- def close(self):
- self._epoll.close()
- super(EpollSelector, self).close()
-
-
-if hasattr(select, "kqueue"):
- class KqueueSelector(BaseSelector):
- """ Kqueue / Kevent-based selector """
- def __init__(self):
- super(KqueueSelector, self).__init__()
- self._kqueue = select.kqueue()
-
- def fileno(self):
- return self._kqueue.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(KqueueSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- if events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- return key
-
- def unregister(self, fileobj):
- key = super(KqueueSelector, self).unregister(fileobj)
- if key.events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
- if key.events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
-
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- timeout = max(timeout, 0)
-
- max_events = len(self._fd_to_key) * 2
- ready_fds = {}
-
- kevent_list = _syscall_wrapper(self._kqueue.control, True,
- None, max_events, timeout)
-
- for kevent in kevent_list:
- fd = kevent.ident
- event_mask = kevent.filter
- events = 0
- if event_mask == select.KQ_FILTER_READ:
- events |= EVENT_READ
- if event_mask == select.KQ_FILTER_WRITE:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- if key.fd not in ready_fds:
- ready_fds[key.fd] = (key, events & key.events)
- else:
- old_events = ready_fds[key.fd][1]
- ready_fds[key.fd] = (key, (events | old_events) & key.events)
-
- return list(ready_fds.values())
-
- def close(self):
- self._kqueue.close()
- super(KqueueSelector, self).close()
-
-
-if not hasattr(select, 'select'): # Platform-specific: AppEngine
- HAS_SELECT = False
-
-
-def _can_allocate(struct):
- """ Checks that select structs can be allocated by the underlying
- operating system, not just advertised by the select module. We don't
- check select() because we'll be hopeful that most platforms that
- don't have it available will not advertise it. (ie: GAE) """
- try:
- # select.poll() objects won't fail until used.
- if struct == 'poll':
- p = select.poll()
- p.poll(0)
-
- # All others will fail on allocation.
- else:
- getattr(select, struct)().close()
- return True
- except (OSError, AttributeError) as e:
- return False
-
-
-# Choose the best implementation, roughly:
-# kqueue == epoll > poll > select. Devpoll not supported. (See above)
-# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
-def DefaultSelector():
- """ This function serves as a first call for DefaultSelector to
- detect if the select module is being monkey-patched incorrectly
- by eventlet, greenlet, and preserve proper behavior. """
- global _DEFAULT_SELECTOR
- if _DEFAULT_SELECTOR is None:
- if _can_allocate('kqueue'):
- _DEFAULT_SELECTOR = KqueueSelector
- elif _can_allocate('epoll'):
- _DEFAULT_SELECTOR = EpollSelector
- elif _can_allocate('poll'):
- _DEFAULT_SELECTOR = PollSelector
- elif hasattr(select, 'select'):
- _DEFAULT_SELECTOR = SelectSelector
- else: # Platform-specific: AppEngine
- raise ValueError('Platform does not have a selector')
- return _DEFAULT_SELECTOR()
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
deleted file mode 100644
index ece3ec39e..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import errno
-import warnings
-import hmac
-
-from binascii import hexlify, unhexlify
-from hashlib import md5, sha1, sha256
-
-from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
-
-
-SSLContext = None
-HAS_SNI = False
-IS_PYOPENSSL = False
-IS_SECURETRANSPORT = False
-
-# Maps the length of a digest to a possible hash function producing this digest
-HASHFUNC_MAP = {
- 32: md5,
- 40: sha1,
- 64: sha256,
-}
-
-
-def _const_compare_digest_backport(a, b):
- """
- Compare two digests of equal length in constant time.
-
- The digests must be of type str/bytes.
- Returns True if the digests match, and False otherwise.
- """
- result = abs(len(a) - len(b))
- for l, r in zip(bytearray(a), bytearray(b)):
- result |= l ^ r
- return result == 0
-
-
-_const_compare_digest = getattr(hmac, 'compare_digest',
- _const_compare_digest_backport)
-
-
-try: # Test for SSL features
- import ssl
- from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
- from ssl import HAS_SNI # Has SNI?
-except ImportError:
- pass
-
-
-try:
- from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
-except ImportError:
- OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
- OP_NO_COMPRESSION = 0x20000
-
-# A secure default.
-# Sources for more information on TLS ciphers:
-#
-# - https://wiki.mozilla.org/Security/Server_Side_TLS
-# - https://www.ssllabs.com/projects/best-practices/index.html
-# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
-#
-# The general intent is:
-# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
-# - prefer ECDHE over DHE for better performance,
-# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
-# security,
-# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
-# - disable NULL authentication, MD5 MACs and DSS for security reasons.
-DEFAULT_CIPHERS = ':'.join([
- 'ECDH+AESGCM',
- 'ECDH+CHACHA20',
- 'DH+AESGCM',
- 'DH+CHACHA20',
- 'ECDH+AES256',
- 'DH+AES256',
- 'ECDH+AES128',
- 'DH+AES',
- 'RSA+AESGCM',
- 'RSA+AES',
- '!aNULL',
- '!eNULL',
- '!MD5',
-])
-
-try:
- from ssl import SSLContext # Modern SSL?
-except ImportError:
- import sys
-
- class SSLContext(object): # Platform-specific: Python 2 & 3.1
- supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
- (3, 2) <= sys.version_info)
-
- def __init__(self, protocol_version):
- self.protocol = protocol_version
- # Use default values from a real SSLContext
- self.check_hostname = False
- self.verify_mode = ssl.CERT_NONE
- self.ca_certs = None
- self.options = 0
- self.certfile = None
- self.keyfile = None
- self.ciphers = None
-
- def load_cert_chain(self, certfile, keyfile):
- self.certfile = certfile
- self.keyfile = keyfile
-
- def load_verify_locations(self, cafile=None, capath=None):
- self.ca_certs = cafile
-
- if capath is not None:
- raise SSLError("CA directories not supported in older Pythons")
-
- def set_ciphers(self, cipher_suite):
- if not self.supports_set_ciphers:
- raise TypeError(
- 'Your version of Python does not support setting '
- 'a custom cipher suite. Please upgrade to Python '
- '2.7, 3.2, or later if you need this functionality.'
- )
- self.ciphers = cipher_suite
-
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
- warnings.warn(
- 'A true SSLContext object is not available. This prevents '
- 'urllib3 from configuring SSL appropriately and may cause '
- 'certain SSL connections to fail. You can upgrade to a newer '
- 'version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- InsecurePlatformWarning
- )
- kwargs = {
- 'keyfile': self.keyfile,
- 'certfile': self.certfile,
- 'ca_certs': self.ca_certs,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'server_side': server_side,
- }
- if self.supports_set_ciphers: # Platform-specific: Python 2.7+
- return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
- else: # Platform-specific: Python 2.6
- return wrap_socket(socket, **kwargs)
-
-
-def assert_fingerprint(cert, fingerprint):
- """
- Checks if given fingerprint matches the supplied certificate.
-
- :param cert:
- Certificate as bytes object.
- :param fingerprint:
- Fingerprint as string of hexdigits, can be interspersed by colons.
- """
-
- fingerprint = fingerprint.replace(':', '').lower()
- digest_length = len(fingerprint)
- hashfunc = HASHFUNC_MAP.get(digest_length)
- if not hashfunc:
- raise SSLError(
- 'Fingerprint of invalid length: {0}'.format(fingerprint))
-
- # We need encode() here for py32; works on py2 and p33.
- fingerprint_bytes = unhexlify(fingerprint.encode())
-
- cert_digest = hashfunc(cert).digest()
-
- if not _const_compare_digest(cert_digest, fingerprint_bytes):
- raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
- .format(fingerprint, hexlify(cert_digest)))
-
-
-def resolve_cert_reqs(candidate):
- """
- Resolves the argument to a numeric constant, which can be passed to
- the wrap_socket function/method from the ssl module.
- Defaults to :data:`ssl.CERT_NONE`.
- If given a string it is assumed to be the name of the constant in the
- :mod:`ssl` module or its abbrevation.
- (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
- If it's neither `None` nor a string we assume it is already the numeric
- constant which can directly be passed to wrap_socket.
- """
- if candidate is None:
- return CERT_NONE
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'CERT_' + candidate)
- return res
-
- return candidate
-
-
-def resolve_ssl_version(candidate):
- """
- like resolve_cert_reqs
- """
- if candidate is None:
- return PROTOCOL_SSLv23
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'PROTOCOL_' + candidate)
- return res
-
- return candidate
-
-
-def create_urllib3_context(ssl_version=None, cert_reqs=None,
- options=None, ciphers=None):
- """All arguments have the same meaning as ``ssl_wrap_socket``.
-
- By default, this function does a lot of the same work that
- ``ssl.create_default_context`` does on Python 3.4+. It:
-
- - Disables SSLv2, SSLv3, and compression
- - Sets a restricted set of server ciphers
-
- If you wish to enable SSLv3, you can do::
-
- from urllib3.util import ssl_
- context = ssl_.create_urllib3_context()
- context.options &= ~ssl_.OP_NO_SSLv3
-
- You can do the same to enable compression (substituting ``COMPRESSION``
- for ``SSLv3`` in the last line above).
-
- :param ssl_version:
- The desired protocol version to use. This will default to
- PROTOCOL_SSLv23 which will negotiate the highest protocol that both
- the server and your installation of OpenSSL support.
- :param cert_reqs:
- Whether to require the certificate verification. This defaults to
- ``ssl.CERT_REQUIRED``.
- :param options:
- Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
- ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
- :param ciphers:
- Which cipher suites to allow the server to select.
- :returns:
- Constructed SSLContext object with specified options
- :rtype: SSLContext
- """
- context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
-
- # Setting the default here, as we may have no ssl module on import
- cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
-
- if options is None:
- options = 0
- # SSLv2 is easily broken and is considered harmful and dangerous
- options |= OP_NO_SSLv2
- # SSLv3 has several problems and is now dangerous
- options |= OP_NO_SSLv3
- # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
- # (issue #309)
- options |= OP_NO_COMPRESSION
-
- context.options |= options
-
- if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
- context.set_ciphers(ciphers or DEFAULT_CIPHERS)
-
- context.verify_mode = cert_reqs
- if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- # We do our own verification, including fingerprints and alternative
- # hostnames. So disable it here
- context.check_hostname = False
- return context
-
-
-def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
- ca_certs=None, server_hostname=None,
- ssl_version=None, ciphers=None, ssl_context=None,
- ca_cert_dir=None):
- """
- All arguments except for server_hostname, ssl_context, and ca_cert_dir have
- the same meaning as they do when using :func:`ssl.wrap_socket`.
-
- :param server_hostname:
- When SNI is supported, the expected hostname of the certificate
- :param ssl_context:
- A pre-made :class:`SSLContext` object. If none is provided, one will
- be created using :func:`create_urllib3_context`.
- :param ciphers:
- A string of ciphers we wish the client to support. This is not
- supported on Python 2.6 as the ssl module does not support it.
- :param ca_cert_dir:
- A directory containing CA certificates in multiple separate files, as
- supported by OpenSSL's -CApath flag or the capath argument to
- SSLContext.load_verify_locations().
- """
- context = ssl_context
- if context is None:
- # Note: This branch of code and all the variables in it are no longer
- # used by urllib3 itself. We should consider deprecating and removing
- # this code.
- context = create_urllib3_context(ssl_version, cert_reqs,
- ciphers=ciphers)
-
- if ca_certs or ca_cert_dir:
- try:
- context.load_verify_locations(ca_certs, ca_cert_dir)
- except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
- raise SSLError(e)
- # Py33 raises FileNotFoundError which subclasses OSError
- # These are not equivalent unless we check the errno attribute
- except OSError as e: # Platform-specific: Python 3.3 and beyond
- if e.errno == errno.ENOENT:
- raise SSLError(e)
- raise
- elif getattr(context, 'load_default_certs', None) is not None:
- # try to load OS default certs; works well on Windows (require Python3.4+)
- context.load_default_certs()
-
- if certfile:
- context.load_cert_chain(certfile, keyfile)
- if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
- return context.wrap_socket(sock, server_hostname=server_hostname)
-
- warnings.warn(
- 'An HTTPS request has been made, but the SNI (Subject Name '
- 'Indication) extension to TLS is not available on this platform. '
- 'This may cause the server to present an incorrect TLS '
- 'certificate, which can cause validation failures. You can upgrade to '
- 'a newer version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- SNIMissingWarning
- )
- return context.wrap_socket(sock)
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
deleted file mode 100644
index 4041cf9b9..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-# The default socket timeout, used by httplib to indicate that no timeout was
-# specified by the user
-from socket import _GLOBAL_DEFAULT_TIMEOUT
-import time
-
-from ..exceptions import TimeoutStateError
-
-# A sentinel value to indicate that no timeout was specified by the user in
-# urllib3
-_Default = object()
-
-
-# Use time.monotonic if available.
-current_time = getattr(time, "monotonic", time.time)
-
-
-class Timeout(object):
- """ Timeout configuration.
-
- Timeouts can be defined as a default for a pool::
-
- timeout = Timeout(connect=2.0, read=7.0)
- http = PoolManager(timeout=timeout)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
-
- Timeouts can be disabled by setting all the parameters to ``None``::
-
- no_timeout = Timeout(connect=None, read=None)
- response = http.request('GET', 'http://example.com/, timeout=no_timeout)
-
-
- :param total:
- This combines the connect and read timeouts into one; the read timeout
- will be set to the time leftover from the connect attempt. In the
- event that both a connect timeout and a total are specified, or a read
- timeout and a total are specified, the shorter timeout will be applied.
-
- Defaults to None.
-
- :type total: integer, float, or None
-
- :param connect:
- The maximum amount of time to wait for a connection attempt to a server
- to succeed. Omitting the parameter will default the connect timeout to
- the system default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout for connection attempts.
-
- :type connect: integer, float, or None
-
- :param read:
- The maximum amount of time to wait between consecutive
- read operations for a response from the server. Omitting
- the parameter will default the read timeout to the system
- default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout.
-
- :type read: integer, float, or None
-
- .. note::
-
- Many factors can affect the total amount of time for urllib3 to return
- an HTTP response.
-
- For example, Python's DNS resolver does not obey the timeout specified
- on the socket. Other factors that can affect total request time include
- high CPU load, high swap, the program running at a low priority level,
- or other behaviors.
-
- In addition, the read and total timeouts only measure the time between
- read operations on the socket connecting the client and the server,
- not the total amount of time for the request to return a complete
- response. For most requests, the timeout is raised because the server
- has not sent the first byte in the specified time. This is not always
- the case; if a server streams one byte every fifteen seconds, a timeout
- of 20 seconds will not trigger, even though the request will take
- several minutes to complete.
-
- If your goal is to cut off any request after a set amount of wall clock
- time, consider having a second "watcher" thread to cut off a slow
- request.
- """
-
- #: A sentinel object representing the default timeout value
- DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
-
- def __init__(self, total=None, connect=_Default, read=_Default):
- self._connect = self._validate_timeout(connect, 'connect')
- self._read = self._validate_timeout(read, 'read')
- self.total = self._validate_timeout(total, 'total')
- self._start_connect = None
-
- def __str__(self):
- return '%s(connect=%r, read=%r, total=%r)' % (
- type(self).__name__, self._connect, self._read, self.total)
-
- @classmethod
- def _validate_timeout(cls, value, name):
- """ Check that a timeout attribute is valid.
-
- :param value: The timeout value to validate
- :param name: The name of the timeout attribute to validate. This is
- used to specify in error messages.
- :return: The validated and casted version of the given value.
- :raises ValueError: If it is a numeric value less than or equal to
- zero, or the type is not an integer, float, or None.
- """
- if value is _Default:
- return cls.DEFAULT_TIMEOUT
-
- if value is None or value is cls.DEFAULT_TIMEOUT:
- return value
-
- if isinstance(value, bool):
- raise ValueError("Timeout cannot be a boolean value. It must "
- "be an int, float or None.")
- try:
- float(value)
- except (TypeError, ValueError):
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- try:
- if value <= 0:
- raise ValueError("Attempted to set %s timeout to %s, but the "
- "timeout cannot be set to a value less "
- "than or equal to 0." % (name, value))
- except TypeError: # Python 3
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- return value
-
- @classmethod
- def from_float(cls, timeout):
- """ Create a new Timeout from a legacy timeout value.
-
- The timeout value used by httplib.py sets the same timeout on the
- connect(), and recv() socket requests. This creates a :class:`Timeout`
- object that sets the individual timeouts to the ``timeout`` value
- passed to this function.
-
- :param timeout: The legacy timeout value.
- :type timeout: integer, float, sentinel default object, or None
- :return: Timeout object
- :rtype: :class:`Timeout`
- """
- return Timeout(read=timeout, connect=timeout)
-
- def clone(self):
- """ Create a copy of the timeout object
-
- Timeout properties are stored per-pool but each request needs a fresh
- Timeout object to ensure each one has its own start/stop configured.
-
- :return: a copy of the timeout object
- :rtype: :class:`Timeout`
- """
- # We can't use copy.deepcopy because that will also create a new object
- # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
- # detect the user default.
- return Timeout(connect=self._connect, read=self._read,
- total=self.total)
-
- def start_connect(self):
- """ Start the timeout clock, used during a connect() attempt
-
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to start a timer that has been started already.
- """
- if self._start_connect is not None:
- raise TimeoutStateError("Timeout timer has already been started.")
- self._start_connect = current_time()
- return self._start_connect
-
- def get_connect_duration(self):
- """ Gets the time elapsed since the call to :meth:`start_connect`.
-
- :return: Elapsed time.
- :rtype: float
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to get duration for a timer that hasn't been started.
- """
- if self._start_connect is None:
- raise TimeoutStateError("Can't get connect duration for timer "
- "that has not started.")
- return current_time() - self._start_connect
-
- @property
- def connect_timeout(self):
- """ Get the value to use when setting a connection timeout.
-
- This will be a positive float or integer, the value None
- (never timeout), or the default system timeout.
-
- :return: Connect timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- """
- if self.total is None:
- return self._connect
-
- if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
- return self.total
-
- return min(self._connect, self.total)
-
- @property
- def read_timeout(self):
- """ Get the value for the read timeout.
-
- This assumes some time has elapsed in the connection timeout and
- computes the read timeout appropriately.
-
- If self.total is set, the read timeout is dependent on the amount of
- time taken by the connect timeout. If the connection time has not been
- established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
- raised.
-
- :return: Value to use for the read timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
- has not yet been called on this object.
- """
- if (self.total is not None and
- self.total is not self.DEFAULT_TIMEOUT and
- self._read is not None and
- self._read is not self.DEFAULT_TIMEOUT):
- # In case the connect timeout has not yet been established.
- if self._start_connect is None:
- return self._read
- return max(0, min(self.total - self.get_connect_duration(),
- self._read))
- elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
- return max(0, self.total - self.get_connect_duration())
- else:
- return self._read
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/collectors/python.d.plugin/python_modules/urllib3/util/url.py
deleted file mode 100644
index 99fd6534a..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/url.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from collections import namedtuple
-
-from ..exceptions import LocationParseError
-
-
-url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
-
-# We only want to normalize urls with an HTTP(S) scheme.
-# urllib3 infers URLs without a scheme (None) to be http.
-NORMALIZABLE_SCHEMES = ('http', 'https', None)
-
-
-class Url(namedtuple('Url', url_attrs)):
- """
- Datastructure for representing an HTTP URL. Used as a return value for
- :func:`parse_url`. Both the scheme and host are normalized as they are
- both case-insensitive according to RFC 3986.
- """
- __slots__ = ()
-
- def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
- query=None, fragment=None):
- if path and not path.startswith('/'):
- path = '/' + path
- if scheme:
- scheme = scheme.lower()
- if host and scheme in NORMALIZABLE_SCHEMES:
- host = host.lower()
- return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
- query, fragment)
-
- @property
- def hostname(self):
- """For backwards-compatibility with urlparse. We're nice like that."""
- return self.host
-
- @property
- def request_uri(self):
- """Absolute path including the query string."""
- uri = self.path or '/'
-
- if self.query is not None:
- uri += '?' + self.query
-
- return uri
-
- @property
- def netloc(self):
- """Network location including host and port"""
- if self.port:
- return '%s:%d' % (self.host, self.port)
- return self.host
-
- @property
- def url(self):
- """
- Convert self into a url
-
- This function should more or less round-trip with :func:`.parse_url`. The
- returned url may not be exactly the same as the url inputted to
- :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
- with a blank port will have : removed).
-
- Example: ::
-
- >>> U = parse_url('http://google.com/mail/')
- >>> U.url
- 'http://google.com/mail/'
- >>> Url('http', 'username:password', 'host.com', 80,
- ... '/path', 'query', 'fragment').url
- 'http://username:password@host.com:80/path?query#fragment'
- """
- scheme, auth, host, port, path, query, fragment = self
- url = ''
-
- # We use "is not None" we want things to happen with empty strings (or 0 port)
- if scheme is not None:
- url += scheme + '://'
- if auth is not None:
- url += auth + '@'
- if host is not None:
- url += host
- if port is not None:
- url += ':' + str(port)
- if path is not None:
- url += path
- if query is not None:
- url += '?' + query
- if fragment is not None:
- url += '#' + fragment
-
- return url
-
- def __str__(self):
- return self.url
-
-
-def split_first(s, delims):
- """
- Given a string and an iterable of delimiters, split on the first found
- delimiter. Return two split parts and the matched delimiter.
-
- If not found, then the first part is the full input string.
-
- Example::
-
- >>> split_first('foo/bar?baz', '?/=')
- ('foo', 'bar?baz', '/')
- >>> split_first('foo/bar?baz', '123')
- ('foo/bar?baz', '', None)
-
- Scales linearly with number of delims. Not ideal for large number of delims.
- """
- min_idx = None
- min_delim = None
- for d in delims:
- idx = s.find(d)
- if idx < 0:
- continue
-
- if min_idx is None or idx < min_idx:
- min_idx = idx
- min_delim = d
-
- if min_idx is None or min_idx < 0:
- return s, '', None
-
- return s[:min_idx], s[min_idx + 1:], min_delim
-
-
-def parse_url(url):
- """
- Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
- performed to parse incomplete urls. Fields not provided will be None.
-
- Partly backwards-compatible with :mod:`urlparse`.
-
- Example::
-
- >>> parse_url('http://google.com/mail/')
- Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
- >>> parse_url('google.com:80')
- Url(scheme=None, host='google.com', port=80, path=None, ...)
- >>> parse_url('/foo?bar')
- Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
- """
-
- # While this code has overlap with stdlib's urlparse, it is much
- # simplified for our needs and less annoying.
- # Additionally, this implementations does silly things to be optimal
- # on CPython.
-
- if not url:
- # Empty
- return Url()
-
- scheme = None
- auth = None
- host = None
- port = None
- path = None
- fragment = None
- query = None
-
- # Scheme
- if '://' in url:
- scheme, url = url.split('://', 1)
-
- # Find the earliest Authority Terminator
- # (http://tools.ietf.org/html/rfc3986#section-3.2)
- url, path_, delim = split_first(url, ['/', '?', '#'])
-
- if delim:
- # Reassemble the path
- path = delim + path_
-
- # Auth
- if '@' in url:
- # Last '@' denotes end of auth part
- auth, url = url.rsplit('@', 1)
-
- # IPv6
- if url and url[0] == '[':
- host, url = url.split(']', 1)
- host += ']'
-
- # Port
- if ':' in url:
- _host, port = url.split(':', 1)
-
- if not host:
- host = _host
-
- if port:
- # If given, ports must be integers. No whitespace, no plus or
- # minus prefixes, no non-integer digits such as ^2 (superscript).
- if not port.isdigit():
- raise LocationParseError(url)
- try:
- port = int(port)
- except ValueError:
- raise LocationParseError(url)
- else:
- # Blank ports are cool, too. (rfc3986#section-3.2.3)
- port = None
-
- elif not host and url:
- host = url
-
- if not path:
- return Url(scheme, auth, host, port, path, query, fragment)
-
- # Fragment
- if '#' in path:
- path, fragment = path.split('#', 1)
-
- # Query
- if '?' in path:
- path, query = path.split('?', 1)
-
- return Url(scheme, auth, host, port, path, query, fragment)
-
-
-def get_host(url):
- """
- Deprecated. Use :func:`parse_url` instead.
- """
- p = parse_url(url)
- return p.scheme or 'http', p.hostname, p.port
diff --git a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
deleted file mode 100644
index 21e72979c..000000000
--- a/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-from .selectors import (
- HAS_SELECT,
- DefaultSelector,
- EVENT_READ,
- EVENT_WRITE
-)
-
-
-def _wait_for_io_events(socks, events, timeout=None):
- """ Waits for IO events to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be interacted with immediately. """
- if not HAS_SELECT:
- raise ValueError('Platform does not have a selector')
- if not isinstance(socks, list):
- # Probably just a single socket.
- if hasattr(socks, "fileno"):
- socks = [socks]
- # Otherwise it might be a non-list iterable.
- else:
- socks = list(socks)
- with DefaultSelector() as selector:
- for sock in socks:
- selector.register(sock, events)
- return [key[0].fileobj for key in
- selector.select(timeout) if key[1] & events]
-
-
-def wait_for_read(socks, timeout=None):
- """ Waits for reading to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be read from immediately. """
- return _wait_for_io_events(socks, EVENT_READ, timeout)
-
-
-def wait_for_write(socks, timeout=None):
- """ Waits for writing to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be written to immediately. """
- return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/collectors/python.d.plugin/rethinkdbs/Makefile.inc b/collectors/python.d.plugin/rethinkdbs/Makefile.inc
deleted file mode 100644
index dec604464..000000000
--- a/collectors/python.d.plugin/rethinkdbs/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += rethinkdbs/rethinkdbs.chart.py
-dist_pythonconfig_DATA += rethinkdbs/rethinkdbs.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += rethinkdbs/README.md rethinkdbs/Makefile.inc
-
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
deleted file mode 120000
index 78ddcfa18..000000000
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/rethinkdb.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
deleted file mode 100644
index ab51c0514..000000000
--- a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
+++ /dev/null
@@ -1,190 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/metadata.yaml"
-sidebar_label: "RethinkDB"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RethinkDB
-
-
-<img src="https://netdata.cloud/img/rethinkdb.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: rethinkdbs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors metrics about RethinkDB clusters and database servers.
-
-It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RethinkDB instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| rethinkdb.cluster_connected_servers | connected, missing | servers |
-| rethinkdb.cluster_clients_active | active | clients |
-| rethinkdb.cluster_queries | queries | queries/s |
-| rethinkdb.cluster_documents | reads, writes | documents/s |
-
-### Per database server
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| rethinkdb.client_connections | connections | connections |
-| rethinkdb.clients_active | active | clients |
-| rethinkdb.queries | queries | queries/s |
-| rethinkdb.documents | reads, writes | documents/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required python module
-
-The collector requires the `rethinkdb` python module to be installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/rethinkdbs.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/rethinkdbs.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | Hostname or ip of the RethinkDB server. | localhost | no |
-| port | Port to connect to the RethinkDB server. | 28015 | no |
-| user | The username to use to connect to the RethinkDB server. | admin | no |
-| password | The password to use to connect to the RethinkDB server. | | no |
-| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |
-
-</details>
-
-#### Examples
-
-##### Local RethinkDB server
-
-An example of a configuration for a local RethinkDB server
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 28015
- user: "user"
- password: "pass"
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin rethinkdbs debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/rethinkdbs/metadata.yaml b/collectors/python.d.plugin/rethinkdbs/metadata.yaml
deleted file mode 100644
index bbc50eac6..000000000
--- a/collectors/python.d.plugin/rethinkdbs/metadata.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: rethinkdbs
- monitored_instance:
- name: RethinkDB
- link: 'https://rethinkdb.com/'
- categories:
- - data-collection.database-servers
- icon_filename: 'rethinkdb.png'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - rethinkdb
- - database
- - db
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors metrics about RethinkDB clusters and database servers.'
- method_description: 'It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Required python module'
- description: 'The collector requires the `rethinkdb` python module to be installed.'
- configuration:
- file:
- name: python.d/rethinkdbs.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: host
- description: Hostname or ip of the RethinkDB server.
- default_value: 'localhost'
- required: false
- - name: port
- description: Port to connect to the RethinkDB server.
- default_value: '28015'
- required: false
- - name: user
- description: The username to use to connect to the RethinkDB server.
- default_value: 'admin'
- required: false
- - name: password
- description: The password to use to connect to the RethinkDB server.
- default_value: ''
- required: false
- - name: timeout
- description: Set a connect timeout to the RethinkDB server.
- default_value: '2'
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local RethinkDB server
- description: An example of a configuration for a local RethinkDB server
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 28015
- user: "user"
- password: "pass"
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: rethinkdb.cluster_connected_servers
- description: Connected Servers
- unit: "servers"
- chart_type: stacked
- dimensions:
- - name: connected
- - name: missing
- - name: rethinkdb.cluster_clients_active
- description: Active Clients
- unit: "clients"
- chart_type: line
- dimensions:
- - name: active
- - name: rethinkdb.cluster_queries
- description: Queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: rethinkdb.cluster_documents
- description: Documents
- unit: "documents/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: database server
- description: ""
- labels: []
- metrics:
- - name: rethinkdb.client_connections
- description: Client Connections
- unit: "connections"
- chart_type: line
- dimensions:
- - name: connections
- - name: rethinkdb.clients_active
- description: Active Clients
- unit: "clients"
- chart_type: line
- dimensions:
- - name: active
- - name: rethinkdb.queries
- description: Queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: rethinkdb.documents
- description: Documents
- unit: "documents/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
deleted file mode 100644
index e3fbc3632..000000000
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: rethinkdb netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import rethinkdb as rdb
-
- HAS_RETHINKDB = True
-except ImportError:
- HAS_RETHINKDB = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-ORDER = [
- 'cluster_connected_servers',
- 'cluster_clients_active',
- 'cluster_queries',
- 'cluster_documents',
-]
-
-
-def cluster_charts():
- return {
- 'cluster_connected_servers': {
- 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
- 'stacked'],
- 'lines': [
- ['cluster_servers_connected', 'connected'],
- ['cluster_servers_missing', 'missing'],
- ]
- },
- 'cluster_clients_active': {
- 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
- 'line'],
- 'lines': [
- ['cluster_clients_active', 'active'],
- ]
- },
- 'cluster_queries': {
- 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
- 'lines': [
- ['cluster_queries_per_sec', 'queries'],
- ]
- },
- 'cluster_documents': {
- 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
- 'lines': [
- ['cluster_read_docs_per_sec', 'reads'],
- ['cluster_written_docs_per_sec', 'writes'],
- ]
- },
- }
-
-
-def server_charts(n):
- o = [
- '{0}_client_connections'.format(n),
- '{0}_clients_active'.format(n),
- '{0}_queries'.format(n),
- '{0}_documents'.format(n),
- ]
- f = 'server {0}'.format(n)
-
- c = {
- o[0]: {
- 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
- 'lines': [
- ['{0}_client_connections'.format(n), 'connections'],
- ]
- },
- o[1]: {
- 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
- 'lines': [
- ['{0}_clients_active'.format(n), 'active'],
- ]
- },
- o[2]: {
- 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
- 'lines': [
- ['{0}_queries_total'.format(n), 'queries', 'incremental'],
- ]
- },
- o[3]: {
- 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
- 'lines': [
- ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
- ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
- ]
- },
- }
-
- return o, c
-
-
-class Cluster:
- def __init__(self, raw):
- self.raw = raw
-
- def data(self):
- qe = self.raw['query_engine']
-
- return {
- 'cluster_clients_active': qe['clients_active'],
- 'cluster_queries_per_sec': qe['queries_per_sec'],
- 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
- 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
- 'cluster_servers_connected': 0,
- 'cluster_servers_missing': 0,
- }
-
-
-class Server:
- def __init__(self, raw):
- self.name = raw['server']
- self.raw = raw
-
- def error(self):
- return self.raw.get('error')
-
- def data(self):
- qe = self.raw['query_engine']
-
- d = {
- 'client_connections': qe['client_connections'],
- 'clients_active': qe['clients_active'],
- 'queries_total': qe['queries_total'],
- 'read_docs_total': qe['read_docs_total'],
- 'written_docs_total': qe['written_docs_total'],
- }
-
- return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
-
-
-# https://pypi.org/project/rethinkdb/2.4.0/
-# rdb.RethinkDB() can be used as rdb drop in replacement.
-# https://github.com/rethinkdb/rethinkdb-python#quickstart
-def get_rethinkdb():
- if hasattr(rdb, 'RethinkDB'):
- return rdb.RethinkDB()
- return rdb
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list(ORDER)
- self.definitions = cluster_charts()
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 28015)
- self.user = self.configuration.get('user', 'admin')
- self.password = self.configuration.get('password')
- self.timeout = self.configuration.get('timeout', 2)
- self.rdb = None
- self.conn = None
- self.alive = True
-
- def check(self):
- if not HAS_RETHINKDB:
- self.error('"rethinkdb" module is needed to use rethinkdbs.py')
- return False
-
- self.debug("rethinkdb driver version {0}".format(rdb.__version__))
- self.rdb = get_rethinkdb()
-
- if not self.connect():
- return None
-
- stats = self.get_stats()
-
- if not stats:
- return None
-
- for v in stats[1:]:
- if get_id(v) == 'server':
- o, c = server_charts(v['server'])
- self.order.extend(o)
- self.definitions.update(c)
-
- return True
-
- def get_data(self):
- if not self.is_alive():
- return None
-
- stats = self.get_stats()
-
- if not stats:
- return None
-
- data = dict()
-
- # cluster
- data.update(Cluster(stats[0]).data())
-
- # servers
- for v in stats[1:]:
- if get_id(v) != 'server':
- continue
-
- s = Server(v)
-
- if s.error():
- data['cluster_servers_missing'] += 1
- else:
- data['cluster_servers_connected'] += 1
- data.update(s.data())
-
- return data
-
- def get_stats(self):
- try:
- return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items)
- except rdb.errors.ReqlError:
- self.alive = False
- return None
-
- def connect(self):
- try:
- self.conn = self.rdb.connect(
- host=self.host,
- port=self.port,
- user=self.user,
- password=self.password,
- timeout=self.timeout,
- )
- self.alive = True
- return True
- except rdb.errors.ReqlError as error:
- self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
- return False
-
- def reconnect(self):
- # The connection is already closed after rdb.errors.ReqlError,
- # so we do not need to call conn.close()
- if self.connect():
- return True
- return False
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
-
-
-def get_id(v):
- return v['id'][0]
diff --git a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
deleted file mode 100644
index d671acbb0..000000000
--- a/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
+++ /dev/null
@@ -1,76 +0,0 @@
-# netdata python.d.plugin configuration for rethinkdb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, rethinkdb also supports the following:
-#
-# host: IP or HOSTNAME # default is 'localhost'
-# port: PORT # default is 28015
-# user: USERNAME # default is 'admin'
-# password: PASSWORD # not set by default
-# timeout: TIMEOUT # default is 2
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- name: 'local'
- host: 'localhost'
diff --git a/collectors/python.d.plugin/retroshare/Makefile.inc b/collectors/python.d.plugin/retroshare/Makefile.inc
deleted file mode 100644
index 891193e6d..000000000
--- a/collectors/python.d.plugin/retroshare/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += retroshare/retroshare.chart.py
-dist_pythonconfig_DATA += retroshare/retroshare.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += retroshare/README.md retroshare/Makefile.inc
-
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
deleted file mode 120000
index 4e4c2cdb7..000000000
--- a/collectors/python.d.plugin/retroshare/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/retroshare.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/retroshare/integrations/retroshare.md b/collectors/python.d.plugin/retroshare/integrations/retroshare.md
deleted file mode 100644
index 4fc003c6f..000000000
--- a/collectors/python.d.plugin/retroshare/integrations/retroshare.md
+++ /dev/null
@@ -1,191 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/metadata.yaml"
-sidebar_label: "RetroShare"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Media Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RetroShare
-
-
-<img src="https://netdata.cloud/img/retroshare.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: retroshare
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.
-
-It connects to the RetroShare web interface to gather metrics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RetroShare instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| retroshare.bandwidth | Upload, Download | kilobits/s |
-| retroshare.peers | All friends, Connected friends | peers |
-| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |
-
-
-## Setup
-
-### Prerequisites
-
-#### RetroShare web interface
-
-RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/retroshare.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/retroshare.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |
-
-</details>
-
-#### Examples
-
-##### Local RetroShare Web UI
-
-A basic configuration for a RetroShare server running on localhost.
-
-<details><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local retroshare'
- url: 'http://localhost:9090'
-
-```
-</details>
-
-##### Remote RetroShare Web UI
-
-A basic configuration for a remote RetroShare server.
-
-<details><summary>Config</summary>
-
-```yaml
-remote:
- name: 'remote retroshare'
- url: 'http://1.2.3.4:9090'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin retroshare debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/retroshare/metadata.yaml b/collectors/python.d.plugin/retroshare/metadata.yaml
deleted file mode 100644
index 0a769616b..000000000
--- a/collectors/python.d.plugin/retroshare/metadata.yaml
+++ /dev/null
@@ -1,144 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: retroshare
- monitored_instance:
- name: RetroShare
- link: "https://retroshare.cc/"
- categories:
- - data-collection.media-streaming-servers
- icon_filename: "retroshare.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - retroshare
- - p2p
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics."
- method_description: "It connects to the RetroShare web interface to gather metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "RetroShare web interface"
- description: |
- RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
- configuration:
- file:
- name: python.d/retroshare.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: url
- description: The URL to the RetroShare Web UI.
- default_value: "http://localhost:9090"
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local RetroShare Web UI
- description: A basic configuration for a RetroShare server running on localhost.
- config: |
- localhost:
- name: 'local retroshare'
- url: 'http://localhost:9090'
- - name: Remote RetroShare Web UI
- description: A basic configuration for a remote RetroShare server.
- config: |
- remote:
- name: 'remote retroshare'
- url: 'http://1.2.3.4:9090'
-
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: retroshare_dht_working
- link: https://github.com/netdata/netdata/blob/master/health/health.d/retroshare.conf
- metric: retroshare.dht
- info: number of DHT peers
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: retroshare.bandwidth
- description: RetroShare Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: Upload
- - name: Download
- - name: retroshare.peers
- description: RetroShare Peers
- unit: "peers"
- chart_type: line
- dimensions:
- - name: All friends
- - name: Connected friends
- - name: retroshare.dht
- description: Retroshare DHT
- unit: "peers"
- chart_type: line
- dimensions:
- - name: DHT nodes estimated
- - name: RS nodes estimated
diff --git a/collectors/python.d.plugin/retroshare/retroshare.chart.py b/collectors/python.d.plugin/retroshare/retroshare.chart.py
deleted file mode 100644
index 3f9593e94..000000000
--- a/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: RetroShare netdata python.d module
-# Authors: sehraf
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'bandwidth',
- 'peers',
- 'dht',
-]
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
- 'lines': [
- ['bandwidth_up_kb', 'Upload'],
- ['bandwidth_down_kb', 'Download']
- ]
- },
- 'peers': {
- 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
- 'lines': [
- ['peers_all', 'All friends'],
- ['peers_connected', 'Connected friends']
- ]
- },
- 'dht': {
- 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
- 'lines': [
- ['dht_size_all', 'DHT nodes estimated'],
- ['dht_size_rs', 'RS nodes estimated']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.baseurl = self.configuration.get('url', 'http://localhost:9090')
-
- def _get_stats(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- parsed = json.loads(raw)
- if str(parsed['returncode']) != 'ok':
- return None
- except (TypeError, ValueError):
- return None
-
- return parsed['data'][0]
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- self.url = self.baseurl + '/api/v2/stats'
- data = self._get_stats()
- if data is None:
- return None
-
- data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
- if data['dht_active'] is False:
- data['dht_size_all'] = None
- data['dht_size_rs'] = None
-
- return data
diff --git a/collectors/python.d.plugin/retroshare/retroshare.conf b/collectors/python.d.plugin/retroshare/retroshare.conf
deleted file mode 100644
index 3d0af538d..000000000
--- a/collectors/python.d.plugin/retroshare/retroshare.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for RetroShare
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, RetroShare also supports the following:
-#
-# - url: 'url' # the URL to the WebUI
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name: 'local'
- url: 'http://localhost:9090'
diff --git a/collectors/python.d.plugin/riakkv/Makefile.inc b/collectors/python.d.plugin/riakkv/Makefile.inc
deleted file mode 100644
index 87d29f82f..000000000
--- a/collectors/python.d.plugin/riakkv/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += riakkv/riakkv.chart.py
-dist_pythonconfig_DATA += riakkv/riakkv.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += riakkv/README.md riakkv/Makefile.inc
-
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
deleted file mode 120000
index f43ece09b..000000000
--- a/collectors/python.d.plugin/riakkv/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/riakkv.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/riakkv/integrations/riakkv.md b/collectors/python.d.plugin/riakkv/integrations/riakkv.md
deleted file mode 100644
index 2e8279bc3..000000000
--- a/collectors/python.d.plugin/riakkv/integrations/riakkv.md
+++ /dev/null
@@ -1,220 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/metadata.yaml"
-sidebar_label: "RiakKV"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RiakKV
-
-
-<img src="https://netdata.cloud/img/riak.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: riakkv
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors RiakKV metrics about throughput, latency, resources and more.'
-
-
-This collector reads the database stats from the `/stats` endpoint.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RiakKV instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| riak.kv.throughput | gets, puts | operations/s |
-| riak.dt.vnode_updates | counters, sets, maps | operations/s |
-| riak.search | queries | queries/s |
-| riak.search.documents | indexed | documents/s |
-| riak.consistent.operations | gets, puts | operations/s |
-| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |
-| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |
-| riak.search.latency.query | median, min, 95, 99, 999, max | ms |
-| riak.search.latency.index | median, min, 95, 99, 999, max | ms |
-| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |
-| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |
-| riak.vm | processes | total |
-| riak.vm.memory.processes | allocated, used | MB |
-| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |
-| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |
-| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |
-| riak.search.index | errors | errors |
-| riak.core.protobuf_connections | active | connections |
-| riak.core.repairs | read | repairs |
-| riak.core.fsm_active | get, put, secondary index, list keys | fsms |
-| riak.core.fsm_rejected | get, put | fsms |
-| riak.search.index | bad_entry, extract_fail | writes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |
-| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
-| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |
-| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
-| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |
-| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure RiakKV to enable /stats endpoint
-
-You can follow the RiakKV configuration reference documentation for how to enable this.
-
-Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/riakkv.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/riakkv.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| url | The url of the server | no | yes |
-
-</details>
-
-#### Examples
-
-##### Basic (default)
-
-A basic example configuration per job
-
-```yaml
-local:
-url: 'http://localhost:8098/stats'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-local:
- url: 'http://localhost:8098/stats'
-
-remote:
- url: 'http://192.0.2.1:8098/stats'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin riakkv debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/riakkv/metadata.yaml b/collectors/python.d.plugin/riakkv/metadata.yaml
deleted file mode 100644
index 441937f89..000000000
--- a/collectors/python.d.plugin/riakkv/metadata.yaml
+++ /dev/null
@@ -1,358 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: riakkv
- monitored_instance:
- name: RiakKV
- link: "https://riak.com/products/riak-kv/index.html"
- categories:
- - data-collection.database-servers
- icon_filename: "riak.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - database
- - nosql
- - big data
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors RiakKV metrics about throughput, latency, resources and more.'
- method_description: "This collector reads the database stats from the `/stats` endpoint."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure RiakKV to enable /stats endpoint
- description: |
- You can follow the RiakKV configuration reference documentation for how to enable this.
-
- Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
- configuration:
- file:
- name: "python.d/riakkv.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: url
- description: The url of the server
- default_value: no
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic (default)
- folding:
- enabled: false
- description: A basic example configuration per job
- config: |
- local:
- url: 'http://localhost:8098/stats'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local:
- url: 'http://localhost:8098/stats'
-
- remote:
- url: 'http://192.0.2.1:8098/stats'
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: riakkv_1h_kv_get_mean_latency
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.kv.latency.get
- info: average time between reception of client GET request and subsequent response to client over the last hour
- - name: riakkv_kv_get_slow
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.kv.latency.get
- info: average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
- - name: riakkv_1h_kv_put_mean_latency
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.kv.latency.put
- info: average time between reception of client PUT request and subsequent response to the client over the last hour
- - name: riakkv_kv_put_slow
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.kv.latency.put
- info: average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
- - name: riakkv_vm_high_process_count
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.vm
- info: number of processes running in the Erlang VM
- - name: riakkv_list_keys_active
- link: https://github.com/netdata/netdata/blob/master/health/health.d/riakkv.conf
- metric: riak.core.fsm_active
- info: number of currently running list keys finite state machines
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: riak.kv.throughput
- description: Reads & writes coordinated by this node
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: gets
- - name: puts
- - name: riak.dt.vnode_updates
- description: Update operations coordinated by local vnodes by data type
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: counters
- - name: sets
- - name: maps
- - name: riak.search
- description: Search queries on the node
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: riak.search.documents
- description: Documents indexed by search
- unit: "documents/s"
- chart_type: line
- dimensions:
- - name: indexed
- - name: riak.consistent.operations
- description: Consistent node operations
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: gets
- - name: puts
- - name: riak.kv.latency.get
- description: Time between reception of a client GET request and subsequent response to client
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.kv.latency.put
- description: Time between reception of a client PUT request and subsequent response to client
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.dt.latency.counter_merge
- description: Time it takes to perform an Update Counter operation
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.dt.latency.set_merge
- description: Time it takes to perform an Update Set operation
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.dt.latency.map_merge
- description: Time it takes to perform an Update Map operation
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.search.latency.query
- description: Search query latency
- unit: "ms"
- chart_type: line
- dimensions:
- - name: median
- - name: min
- - name: "95"
- - name: "99"
- - name: "999"
- - name: max
- - name: riak.search.latency.index
- description: Time it takes Search to index a new document
- unit: "ms"
- chart_type: line
- dimensions:
- - name: median
- - name: min
- - name: "95"
- - name: "99"
- - name: "999"
- - name: max
- - name: riak.consistent.latency.get
- description: Strongly consistent read latency
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.consistent.latency.put
- description: Strongly consistent write latency
- unit: "ms"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.vm
- description: Total processes running in the Erlang VM
- unit: "total"
- chart_type: line
- dimensions:
- - name: processes
- - name: riak.vm.memory.processes
- description: Memory allocated & used by Erlang processes
- unit: "MB"
- chart_type: line
- dimensions:
- - name: allocated
- - name: used
- - name: riak.kv.siblings_encountered.get
- description: Number of siblings encountered during GET operations by this node during the past minute
- unit: "siblings"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.kv.objsize.get
- description: Object size encountered by this node during the past minute
- unit: "KB"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.search.vnodeq_size
- description: Number of unprocessed messages in the vnode message queues of Search on this node in the past minute
- unit: "messages"
- chart_type: line
- dimensions:
- - name: mean
- - name: median
- - name: "95"
- - name: "99"
- - name: "100"
- - name: riak.search.index
- description: Number of document index errors encountered by Search
- unit: "errors"
- chart_type: line
- dimensions:
- - name: errors
- - name: riak.core.protobuf_connections
- description: Protocol buffer connections by status
- unit: "connections"
- chart_type: line
- dimensions:
- - name: active
- - name: riak.core.repairs
- description: Number of repair operations this node has coordinated
- unit: "repairs"
- chart_type: line
- dimensions:
- - name: read
- - name: riak.core.fsm_active
- description: Active finite state machines by kind
- unit: "fsms"
- chart_type: line
- dimensions:
- - name: get
- - name: put
- - name: secondary index
- - name: list keys
- - name: riak.core.fsm_rejected
- description: Finite state machines being rejected by Sidejobs overload protection
- unit: "fsms"
- chart_type: line
- dimensions:
- - name: get
- - name: put
- - name: riak.search.index
- description: Number of writes to Search failed due to bad data format by reason
- unit: "writes"
- chart_type: line
- dimensions:
- - name: bad_entry
- - name: extract_fail
diff --git a/collectors/python.d.plugin/riakkv/riakkv.chart.py b/collectors/python.d.plugin/riakkv/riakkv.chart.py
deleted file mode 100644
index c390c8bc0..000000000
--- a/collectors/python.d.plugin/riakkv/riakkv.chart.py
+++ /dev/null
@@ -1,334 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: riak netdata python.d module
-#
-# See also:
-# https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# Riak updates the metrics at the /stats endpoint every 1 second.
-# If we use `update_every = 1` here, that means we might get weird jitter in the graph,
-# so the default is set to 2 seconds to prevent it.
-update_every = 2
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- # Throughput metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
- # Collected in totals.
- "kv.node_operations", # K/V node operations.
- "dt.vnode_updates", # Data type vnode updates.
- "search.queries", # Search queries on the node.
- "search.documents", # Documents indexed by Search.
- "consistent.operations", # Consistent node operations.
-
- # Latency metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
- # Collected for the past minute in milliseconds,
- # returned from riak in microseconds.
- "kv.latency.get", # K/V GET FSM traversal latency.
- "kv.latency.put", # K/V PUT FSM traversal latency.
- "dt.latency.counter", # Update Counter Data type latency.
- "dt.latency.set", # Update Set Data type latency.
- "dt.latency.map", # Update Map Data type latency.
- "search.latency.query", # Search query latency.
- "search.latency.index", # Time it takes for search to index a new document.
- "consistent.latency.get", # Strong consistent read latency.
- "consistent.latency.put", # Strong consistent write latency.
-
- # Erlang resource usage metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
- # Processes collected as a gauge,
- # memory collected as Megabytes, returned as bytes from Riak.
- "vm.processes", # Number of processes currently running in the Erlang VM.
- "vm.memory.processes", # Total amount of memory allocated & used for Erlang processes.
-
- # General Riak Load / Health metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
- # The following are collected by Riak over the past minute:
- "kv.siblings_encountered.get", # Siblings encountered during GET operations by this node.
- "kv.objsize.get", # Object size encountered by this node.
- "search.vnodeq_size", # Number of unprocessed messages in the vnode message queues (Search).
- # The following are calculated in total, or as gauges:
- "search.index_errors", # Errors of the search subsystem while indexing documents.
- "core.pbc", # Number of currently active protocol buffer connections.
- "core.repairs", # Total read repair operations coordinated by this node.
- "core.fsm_active", # Active finite state machines by kind.
- "core.fsm_rejected", # Rejected finite state machines by kind.
-
- # General Riak Search Load / Health metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
- # Reported as counters.
- "search.errors", # Write and read errors of the Search subsystem.
-]
-
-CHARTS = {
- # Throughput metrics
- "kv.node_operations": {
- "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
- "line"],
- "lines": [
- ["node_gets_total", "gets", "incremental"],
- ["node_puts_total", "puts", "incremental"]
- ]
- },
- "dt.vnode_updates": {
- "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
- "riak.dt.vnode_updates", "line"],
- "lines": [
- ["vnode_counter_update_total", "counters", "incremental"],
- ["vnode_set_update_total", "sets", "incremental"],
- ["vnode_map_update_total", "maps", "incremental"],
- ]
- },
- "search.queries": {
- "options": [None, "Search queries on the node", "queries/s", "throughput", "riak.search", "line"],
- "lines": [
- ["search_query_throughput_count", "queries", "incremental"]
- ]
- },
- "search.documents": {
- "options": [None, "Documents indexed by search", "documents/s", "throughput", "riak.search.documents", "line"],
- "lines": [
- ["search_index_throughput_count", "indexed", "incremental"]
- ]
- },
- "consistent.operations": {
- "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
- "line"],
- "lines": [
- ["consistent_gets_total", "gets", "incremental"],
- ["consistent_puts_total", "puts", "incremental"],
- ]
- },
-
- # Latency metrics
- "kv.latency.get": {
- "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
- "latency", "riak.kv.latency.get", "line"],
- "lines": [
- ["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
- ["node_get_fsm_time_median", "median", "absolute", 1, 1000],
- ["node_get_fsm_time_95", "95", "absolute", 1, 1000],
- ["node_get_fsm_time_99", "99", "absolute", 1, 1000],
- ["node_get_fsm_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "kv.latency.put": {
- "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
- "latency", "riak.kv.latency.put", "line"],
- "lines": [
- ["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
- ["node_put_fsm_time_median", "median", "absolute", 1, 1000],
- ["node_put_fsm_time_95", "95", "absolute", 1, 1000],
- ["node_put_fsm_time_99", "99", "absolute", 1, 1000],
- ["node_put_fsm_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.counter": {
- "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
- "riak.dt.latency.counter_merge", "line"],
- "lines": [
- ["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_counter_merge_time_median", "median", "absolute", 1, 1000],
- ["object_counter_merge_time_95", "95", "absolute", 1, 1000],
- ["object_counter_merge_time_99", "99", "absolute", 1, 1000],
- ["object_counter_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.set": {
- "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
- "riak.dt.latency.set_merge", "line"],
- "lines": [
- ["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_set_merge_time_median", "median", "absolute", 1, 1000],
- ["object_set_merge_time_95", "95", "absolute", 1, 1000],
- ["object_set_merge_time_99", "99", "absolute", 1, 1000],
- ["object_set_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.map": {
- "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
- "riak.dt.latency.map_merge", "line"],
- "lines": [
- ["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_map_merge_time_median", "median", "absolute", 1, 1000],
- ["object_map_merge_time_95", "95", "absolute", 1, 1000],
- ["object_map_merge_time_99", "99", "absolute", 1, 1000],
- ["object_map_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "search.latency.query": {
- "options": [None, "Search query latency", "ms", "latency", "riak.search.latency.query", "line"],
- "lines": [
- ["search_query_latency_median", "median", "absolute", 1, 1000],
- ["search_query_latency_min", "min", "absolute", 1, 1000],
- ["search_query_latency_95", "95", "absolute", 1, 1000],
- ["search_query_latency_99", "99", "absolute", 1, 1000],
- ["search_query_latency_999", "999", "absolute", 1, 1000],
- ["search_query_latency_max", "max", "absolute", 1, 1000],
- ]
- },
- "search.latency.index": {
- "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
- "line"],
- "lines": [
- ["search_index_latency_median", "median", "absolute", 1, 1000],
- ["search_index_latency_min", "min", "absolute", 1, 1000],
- ["search_index_latency_95", "95", "absolute", 1, 1000],
- ["search_index_latency_99", "99", "absolute", 1, 1000],
- ["search_index_latency_999", "999", "absolute", 1, 1000],
- ["search_index_latency_max", "max", "absolute", 1, 1000],
- ]
- },
-
- # Riak Strong Consistency metrics
- "consistent.latency.get": {
- "options": [None, "Strongly consistent read latency", "ms", "latency", "riak.consistent.latency.get", "line"],
- "lines": [
- ["consistent_get_time_mean", "mean", "absolute", 1, 1000],
- ["consistent_get_time_median", "median", "absolute", 1, 1000],
- ["consistent_get_time_95", "95", "absolute", 1, 1000],
- ["consistent_get_time_99", "99", "absolute", 1, 1000],
- ["consistent_get_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "consistent.latency.put": {
- "options": [None, "Strongly consistent write latency", "ms", "latency", "riak.consistent.latency.put", "line"],
- "lines": [
- ["consistent_put_time_mean", "mean", "absolute", 1, 1000],
- ["consistent_put_time_median", "median", "absolute", 1, 1000],
- ["consistent_put_time_95", "95", "absolute", 1, 1000],
- ["consistent_put_time_99", "99", "absolute", 1, 1000],
- ["consistent_put_time_100", "100", "absolute", 1, 1000],
- ]
- },
-
- # BEAM metrics
- "vm.processes": {
- "options": [None, "Total processes running in the Erlang VM", "total", "vm", "riak.vm", "line"],
- "lines": [
- ["sys_process_count", "processes", "absolute"],
- ]
- },
- "vm.memory.processes": {
- "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
- "line"],
- "lines": [
- ["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
- ["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
- ]
- },
-
- # General Riak Load/Health metrics
- "kv.siblings_encountered.get": {
- "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
- "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
- "lines": [
- ["node_get_fsm_siblings_mean", "mean", "absolute"],
- ["node_get_fsm_siblings_median", "median", "absolute"],
- ["node_get_fsm_siblings_95", "95", "absolute"],
- ["node_get_fsm_siblings_99", "99", "absolute"],
- ["node_get_fsm_siblings_100", "100", "absolute"],
- ]
- },
- "kv.objsize.get": {
- "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
- "riak.kv.objsize.get", "line"],
- "lines": [
- ["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
- ["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
- ["node_get_fsm_objsize_95", "95", "absolute", 1, 1024],
- ["node_get_fsm_objsize_99", "99", "absolute", 1, 1024],
- ["node_get_fsm_objsize_100", "100", "absolute", 1, 1024],
- ]
- },
- "search.vnodeq_size": {
- "options": [None,
- "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
- "messages", "load", "riak.search.vnodeq_size", "line"],
- "lines": [
- ["riak_search_vnodeq_mean", "mean", "absolute"],
- ["riak_search_vnodeq_median", "median", "absolute"],
- ["riak_search_vnodeq_95", "95", "absolute"],
- ["riak_search_vnodeq_99", "99", "absolute"],
- ["riak_search_vnodeq_100", "100", "absolute"],
- ]
- },
- "search.index_errors": {
- "options": [None, "Number of document index errors encountered by Search", "errors", "load",
- "riak.search.index", "line"],
- "lines": [
- ["search_index_fail_count", "errors", "absolute"]
- ]
- },
- "core.pbc": {
- "options": [None, "Protocol buffer connections by status", "connections", "load",
- "riak.core.protobuf_connections", "line"],
- "lines": [
- ["pbc_active", "active", "absolute"],
- # ["pbc_connects", "established_pastmin", "absolute"]
- ]
- },
- "core.repairs": {
- "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
- "riak.core.repairs", "line"],
- "lines": [
- ["read_repairs", "read", "absolute"]
- ]
- },
- "core.fsm_active": {
- "options": [None, "Active finite state machines by kind", "fsms", "load", "riak.core.fsm_active", "line"],
- "lines": [
- ["node_get_fsm_active", "get", "absolute"],
- ["node_put_fsm_active", "put", "absolute"],
- ["index_fsm_active", "secondary index", "absolute"],
- ["list_fsm_active", "list keys", "absolute"]
- ]
- },
- "core.fsm_rejected": {
- # Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
- # its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
- # dashboard for some reason.
- "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
- "riak.core.fsm_rejected", "line"],
- "lines": [
- ["node_get_fsm_rejected", "get", "absolute"],
- ["node_put_fsm_rejected", "put", "absolute"]
- ]
- },
-
- # General Riak Search Load / Health metrics
- "search.errors": {
- "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
- "riak.search.index", "line"],
- "lines": [
- ["search_index_bad_entry_count", "bad_entry", "absolute"],
- ["search_index_extract_fail_count", "extract_fail", "absolute"],
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- try:
- return loads(raw)
- except (TypeError, ValueError) as err:
- self.error(err)
- return None
diff --git a/collectors/python.d.plugin/riakkv/riakkv.conf b/collectors/python.d.plugin/riakkv/riakkv.conf
deleted file mode 100644
index be01c48ac..000000000
--- a/collectors/python.d.plugin/riakkv/riakkv.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for riak
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- url : 'http://localhost:8098/stats'
diff --git a/collectors/python.d.plugin/samba/Makefile.inc b/collectors/python.d.plugin/samba/Makefile.inc
deleted file mode 100644
index 230a8ba43..000000000
--- a/collectors/python.d.plugin/samba/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += samba/samba.chart.py
-dist_pythonconfig_DATA += samba/samba.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += samba/README.md samba/Makefile.inc
-
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
deleted file mode 120000
index 3b63bbab6..000000000
--- a/collectors/python.d.plugin/samba/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/samba.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/samba/integrations/samba.md b/collectors/python.d.plugin/samba/integrations/samba.md
deleted file mode 100644
index 1bd1664ee..000000000
--- a/collectors/python.d.plugin/samba/integrations/samba.md
+++ /dev/null
@@ -1,221 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/metadata.yaml"
-sidebar_label: "Samba"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Samba
-
-
-<img src="https://netdata.cloud/img/samba.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: samba
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors the performance metrics of Samba file sharing.
-
-It is using the `smbstatus` command-line tool.
-
-Executed commands:
-
-- `sudo -n smbstatus -P`
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-After all the permissions are satisfied, the `smbstatus -P` binary is executed.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Samba instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| syscall.rw | sendfile, recvfile | KiB/s |
-| smb2.rw | readout, writein, readin, writeout | KiB/s |
-| smb2.create_close | create, close | operations/s |
-| smb2.get_set_info | getinfo, setinfo | operations/s |
-| smb2.find | find | operations/s |
-| smb2.notify | notify | operations/s |
-| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the samba collector
-
-The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d.conf
-```
-Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
-
-
-#### Permissions and programs
-
-To run the collector you need:
-
-- `smbstatus` program
-- `sudo` program
-- `smbd` must be compiled with profiling enabled
-- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
-
-The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
-- add to your `/etc/sudoers` file:
-
- `which smbstatus` shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/smbstatus
- ```
-
-- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
-
- The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
-
-
- As the `root` user, do the following:
-
- ```cmd
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/samba.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/samba.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-<details><summary>Config</summary>
-
-```yaml
-my_job_name:
- name: my_name
- update_every: 1
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin samba debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/samba/metadata.yaml b/collectors/python.d.plugin/samba/metadata.yaml
deleted file mode 100644
index ec31e0475..000000000
--- a/collectors/python.d.plugin/samba/metadata.yaml
+++ /dev/null
@@ -1,205 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: samba
- monitored_instance:
- name: Samba
- link: https://www.samba.org/samba/
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "samba.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - samba
- - file sharing
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors the performance metrics of Samba file sharing."
- method_description: |
- It is using the `smbstatus` command-line tool.
-
- Executed commands:
-
- - `sudo -n smbstatus -P`
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: |
- `smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
- default_behavior:
- auto_detection:
- description: "After all the permissions are satisfied, the `smbstatus -P` binary is executed."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the samba collector
- description: |
- The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
- Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
- - title: Permissions and programs
- description: |
- To run the collector you need:
-
- - `smbstatus` program
- - `sudo` program
- - `smbd` must be compiled with profiling enabled
- - `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
-
- The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
- - add to your `/etc/sudoers` file:
-
- `which smbstatus` shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/smbstatus
- ```
-
- - Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
-
- The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
-
-
- As the `root` user, do the following:
-
- ```cmd
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
- configuration:
- file:
- name: python.d/samba.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration.
- config: |
- my_job_name:
- name: my_name
- update_every: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: syscall.rw
- description: R/Ws
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: sendfile
- - name: recvfile
- - name: smb2.rw
- description: R/Ws
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: readout
- - name: writein
- - name: readin
- - name: writeout
- - name: smb2.create_close
- description: Create/Close
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: create
- - name: close
- - name: smb2.get_set_info
- description: Info
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: getinfo
- - name: setinfo
- - name: smb2.find
- description: Find
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: find
- - name: smb2.notify
- description: Notify
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: notify
- - name: smb2.sm_counters
- description: Lesser Ops
- unit: "count"
- chart_type: stacked
- dimensions:
- - name: tcon
- - name: negprot
- - name: tdis
- - name: cancel
- - name: logoff
- - name: flush
- - name: lock
- - name: keepalive
- - name: break
- - name: sessetup
diff --git a/collectors/python.d.plugin/samba/samba.chart.py b/collectors/python.d.plugin/samba/samba.chart.py
deleted file mode 100644
index 8eebcd60c..000000000
--- a/collectors/python.d.plugin/samba/samba.chart.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: samba netdata python.d module
-# Author: Christopher Cox <chris_cox@endlessnow.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# The netdata user needs to be able to be able to sudo the smbstatus program
-# without password:
-# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-#
-# This makes calls to smbstatus -P
-#
-# This just looks at a couple of values out of syscall, and some from smb2.
-#
-# The Lesser Ops chart is merely a display of current counter values. They
-# didn't seem to change much to me. However, if you notice something changing
-# a lot there, bring one or more out into its own chart and make it incremental
-# (like find and notify... good examples).
-
-import re
-import os
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-update_every = 5
-
-ORDER = [
- 'syscall_rw',
- 'smb2_rw',
- 'smb2_create_close',
- 'smb2_info',
- 'smb2_find',
- 'smb2_notify',
- 'smb2_sm_count'
-]
-
-CHARTS = {
- 'syscall_rw': {
- 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
- 'lines': [
- ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
- ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
- ]
- },
- 'smb2_rw': {
- 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
- 'lines': [
- ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
- ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
- ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
- ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
- ]
- },
- 'smb2_create_close': {
- 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
- 'lines': [
- ['smb2_create_count', 'create', 'incremental', 1, 1],
- ['smb2_close_count', 'close', 'incremental', -1, 1]
- ]
- },
- 'smb2_info': {
- 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
- 'lines': [
- ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
- ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
- ]
- },
- 'smb2_find': {
- 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
- 'lines': [
- ['smb2_find_count', 'find', 'incremental', 1, 1]
- ]
- },
- 'smb2_notify': {
- 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
- 'lines': [
- ['smb2_notify_count', 'notify', 'incremental', 1, 1]
- ]
- },
- 'smb2_sm_count': {
- 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
- 'lines': [
- ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
- ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
- ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
- ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
- ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
- ['smb2_flush_count', 'flush', 'absolute', 1, 1],
- ['smb2_lock_count', 'lock', 'absolute', 1, 1],
- ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
- ['smb2_break_count', 'break', 'absolute', 1, 1],
- ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
- ]
- }
-}
-
-SUDO = 'sudo'
-SMBSTATUS = 'smbstatus'
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
-
- def check(self):
- smbstatus_binary = find_binary(SMBSTATUS)
- if not smbstatus_binary:
- self.error("can't locate '{0}' binary".format(SMBSTATUS))
- return False
-
- if os.getuid() == 0:
- self.command = ' '.join([smbstatus_binary, '-P'])
- return ExecutableService.check(self)
-
- sudo_binary = find_binary(SUDO)
- if not sudo_binary:
- self.error("can't locate '{0}' binary".format(SUDO))
- return False
- command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
- smbstatus = '{0} -P'.format(smbstatus_binary)
- allowed = self._get_raw_data(command=command)
- if not (allowed and allowed[0].strip() == smbstatus):
- self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
- return False
- self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
- return ExecutableService.check(self)
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- parsed = self.rgx_smb2.findall(' '.join(raw_data))
-
- return dict(parsed) or None
diff --git a/collectors/python.d.plugin/samba/samba.conf b/collectors/python.d.plugin/samba/samba.conf
deleted file mode 100644
index db15d4e9e..000000000
--- a/collectors/python.d.plugin/samba/samba.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for samba
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/Makefile.inc b/collectors/python.d.plugin/sensors/Makefile.inc
deleted file mode 100644
index 5fb26e1c8..000000000
--- a/collectors/python.d.plugin/sensors/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += sensors/sensors.chart.py
-dist_pythonconfig_DATA += sensors/sensors.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += sensors/README.md sensors/Makefile.inc
-
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
deleted file mode 120000
index 4e92b0882..000000000
--- a/collectors/python.d.plugin/sensors/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/linux_sensors_lm-sensors.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
deleted file mode 100644
index e426c8c83..000000000
--- a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
+++ /dev/null
@@ -1,187 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/metadata.yaml"
-sidebar_label: "Linux Sensors (lm-sensors)"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Linux Sensors (lm-sensors)
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: sensors
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine Linux Sensors metrics with Netdata for insights into hardware health and performance.
-
-Enhance your system's reliability with real-time hardware health insights.
-
-
-Reads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The following type of sensors are auto-detected:
-- temperature - fan - voltage - current - power - energy - humidity
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per chip
-
-Metrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| sensors.temperature | a dimension per sensor | Celsius |
-| sensors.voltage | a dimension per sensor | Volts |
-| sensors.current | a dimension per sensor | Ampere |
-| sensors.power | a dimension per sensor | Watt |
-| sensors.fan | a dimension per sensor | Rotations/min |
-| sensors.energy | a dimension per sensor | Joule |
-| sensors.humidity | a dimension per sensor | Percent |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/sensors.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/sensors.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Default
-
-Default configuration.
-
-```yaml
-types:
- - temperature
- - fan
- - voltage
- - current
- - power
- - energy
- - humidity
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin sensors debug trace
- ```
-
-### lm-sensors doesn't work on your device
-
-
-
-### ACPI ring buffer errors are printed
-
-
-
-
diff --git a/collectors/python.d.plugin/sensors/metadata.yaml b/collectors/python.d.plugin/sensors/metadata.yaml
deleted file mode 100644
index d7cb2206f..000000000
--- a/collectors/python.d.plugin/sensors/metadata.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: sensors
- monitored_instance:
- name: Linux Sensors (lm-sensors)
- link: https://hwmon.wiki.kernel.org/lm_sensors
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sensors
- - temperature
- - voltage
- - current
- - power
- - fan
- - energy
- - humidity
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Examine Linux Sensors metrics with Netdata for insights into hardware health and performance.
-
- Enhance your system's reliability with real-time hardware health insights.
- method_description: >
- Reads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: >
- The following type of sensors are auto-detected:
-
- - temperature
- - fan
- - voltage
- - current
- - power
- - energy
- - humidity
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/sensors.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: types
- description: The types of sensors to collect.
- default_value: "temperature, fan, voltage, current, power, energy, humidity"
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Default
- folding:
- enabled: false
- description: Default configuration.
- config: |
- types:
- - temperature
- - fan
- - voltage
- - current
- - power
- - energy
- - humidity
- troubleshooting:
- problems:
- list:
- - name: lm-sensors doesn't work on your device
- description: |
- When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures),
- use [the legacy bash collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md)
- - name: ACPI ring buffer errors are printed
- description: |
- There have been reports from users that on certain servers, ACPI ring buffer errors are printed by the kernel (`dmesg`)
- when ACPI sensors are being accessed. We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
- Please join this discussion for help.
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: chip
- description: >
- Metrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.
- labels: []
- metrics:
- - name: sensors.temperature
- description: Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.voltage
- description: Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.current
- description: Current
- unit: "Ampere"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.power
- description: Power
- unit: "Watt"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.fan
- description: Fans speed
- unit: "Rotations/min"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.energy
- description: Energy
- unit: "Joule"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
- - name: sensors.humidity
- description: Humidity
- unit: "Percent"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
diff --git a/collectors/python.d.plugin/sensors/sensors.chart.py b/collectors/python.d.plugin/sensors/sensors.chart.py
deleted file mode 100644
index 0d9de3750..000000000
--- a/collectors/python.d.plugin/sensors/sensors.chart.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: sensors netdata python.d plugin
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from collections import defaultdict
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import lm_sensors as sensors
-
-ORDER = [
- 'temperature',
- 'fan',
- 'voltage',
- 'current',
- 'power',
- 'energy',
- 'humidity',
-]
-
-# This is a prototype of chart definition which is used to dynamically create self.definitions
-CHARTS = {
- 'temperature': {
- 'options': [None, 'Temperature', 'Celsius', 'temperature', 'sensors.temperature', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- },
- 'voltage': {
- 'options': [None, 'Voltage', 'Volts', 'voltage', 'sensors.voltage', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- },
- 'current': {
- 'options': [None, 'Current', 'Ampere', 'current', 'sensors.current', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- },
- 'power': {
- 'options': [None, 'Power', 'Watt', 'power', 'sensors.power', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- },
- 'fan': {
- 'options': [None, 'Fans speed', 'Rotations/min', 'fans', 'sensors.fan', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- },
- 'energy': {
- 'options': [None, 'Energy', 'Joule', 'energy', 'sensors.energy', 'line'],
- 'lines': [
- [None, None, 'incremental', 1, 1000]
- ]
- },
- 'humidity': {
- 'options': [None, 'Humidity', 'Percent', 'humidity', 'sensors.humidity', 'line'],
- 'lines': [
- [None, None, 'absolute', 1, 1000]
- ]
- }
-}
-
-LIMITS = {
- 'temperature': [-127, 1000],
- 'voltage': [-400, 400],
- 'current': [-127, 127],
- 'fan': [0, 65535]
-}
-
-TYPE_MAP = {
- 0: 'voltage',
- 1: 'fan',
- 2: 'temperature',
- 3: 'power',
- 4: 'energy',
- 5: 'current',
- 6: 'humidity',
- # 7: 'max_main',
- # 16: 'vid',
- # 17: 'intrusion',
- # 18: 'max_other',
- # 24: 'beep_enable'
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.chips = configuration.get('chips')
- self.priority = 60000
-
- def get_data(self):
- seen, data = dict(), dict()
- try:
- for chip in sensors.ChipIterator():
- chip_name = sensors.chip_snprintf_name(chip)
- seen[chip_name] = defaultdict(list)
-
- for feat in sensors.FeatureIterator(chip):
- if feat.type not in TYPE_MAP:
- continue
-
- feat_type = TYPE_MAP[feat.type]
- feat_name = str(feat.name.decode())
- feat_label = sensors.get_label(chip, feat)
- feat_limits = LIMITS.get(feat_type)
- sub_feat = next(sensors.SubFeatureIterator(chip, feat)) # current value
-
- if not sub_feat:
- continue
-
- try:
- v = sensors.get_value(chip, sub_feat.number)
- except sensors.SensorsError:
- continue
-
- if v is None:
- continue
-
- seen[chip_name][feat_type].append((feat_name, feat_label))
-
- if feat_limits and (v < feat_limits[0] or v > feat_limits[1]):
- continue
-
- data[chip_name + '_' + feat_name] = int(v * 1000)
-
- except sensors.SensorsError as error:
- self.error(error)
- return None
-
- self.update_sensors_charts(seen)
-
- return data or None
-
- def update_sensors_charts(self, seen):
- for chip_name, feat in seen.items():
- if self.chips and not any([chip_name.startswith(ex) for ex in self.chips]):
- continue
-
- for feat_type, sub_feat in feat.items():
- if feat_type not in ORDER or feat_type not in CHARTS:
- continue
-
- chart_id = '{}_{}'.format(chip_name, feat_type)
- if chart_id in self.charts:
- continue
-
- params = [chart_id] + list(CHARTS[feat_type]['options'])
- new_chart = self.charts.add_chart(params)
- new_chart.params['priority'] = self.get_chart_priority(feat_type)
-
- for name, label in sub_feat:
- lines = list(CHARTS[feat_type]['lines'][0])
- lines[0] = chip_name + '_' + name
- lines[1] = label
- new_chart.add_dimension(lines)
-
- def check(self):
- try:
- sensors.init()
- except sensors.SensorsError as error:
- self.error(error)
- return False
-
- self.priority = self.charts.priority
-
- return bool(self.get_data() and self.charts)
-
- def get_chart_priority(self, feat_type):
- for i, v in enumerate(ORDER):
- if v == feat_type:
- return self.priority + i
- return self.priority
diff --git a/collectors/python.d.plugin/sensors/sensors.conf b/collectors/python.d.plugin/sensors/sensors.conf
deleted file mode 100644
index d3369ba66..000000000
--- a/collectors/python.d.plugin/sensors/sensors.conf
+++ /dev/null
@@ -1,61 +0,0 @@
-# netdata python.d.plugin configuration for sensors
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# Limit the number of sensors types.
-# Comment the ones you want to disable.
-# Also, re-arranging this list controls the order of the charts at the
-# netdata dashboard.
-
-types:
- - temperature
- - fan
- - voltage
- - current
- - power
- - energy
- - humidity
-
-# ----------------------------------------------------------------------
-# Limit the number of sensors chips.
-# Uncomment the first line (chips:) and add chip names below it.
-# The chip names that start with like that will be matched.
-# You can find the chip names using the sensors command.
-
-#chips:
-# - i8k
-# - coretemp
-#
-# chip names can be found using the sensors shell command
-# the prefix is matched (anything that starts like that)
-#
-#----------------------------------------------------------------------
-
diff --git a/collectors/python.d.plugin/smartd_log/Makefile.inc b/collectors/python.d.plugin/smartd_log/Makefile.inc
deleted file mode 100644
index dc1d0f3fb..000000000
--- a/collectors/python.d.plugin/smartd_log/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += smartd_log/smartd_log.chart.py
-dist_pythonconfig_DATA += smartd_log/smartd_log.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += smartd_log/README.md smartd_log/Makefile.inc
-
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
deleted file mode 120000
index 63aad6c85..000000000
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/s.m.a.r.t..md \ No newline at end of file
diff --git a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
deleted file mode 100644
index 5c5b569e9..000000000
--- a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
+++ /dev/null
@@ -1,223 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/metadata.yaml"
-sidebar_label: "S.M.A.R.T."
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# S.M.A.R.T.
-
-
-<img src="https://netdata.cloud/img/smart.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: smartd_log
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.
-
-
-It reads `smartd` log files to collect the metrics.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Upon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-The metrics listed below are split in terms of availability on device type, SCSI or ATA.
-
-### Per S.M.A.R.T. instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit | SCSI | ATA |
-|:------|:----------|:----|:---:|:---:|
-| smartd_log.read_error_rate | a dimension per device | value | | • |
-| smartd_log.seek_error_rate | a dimension per device | value | | • |
-| smartd_log.soft_read_error_rate | a dimension per device | errors | | • |
-| smartd_log.write_error_rate | a dimension per device | value | | • |
-| smartd_log.read_total_err_corrected | a dimension per device | errors | • | |
-| smartd_log.read_total_unc_errors | a dimension per device | errors | • | |
-| smartd_log.write_total_err_corrected | a dimension per device | errors | • | |
-| smartd_log.write_total_unc_errors | a dimension per device | errors | • | |
-| smartd_log.verify_total_err_corrected | a dimension per device | errors | • | |
-| smartd_log.verify_total_unc_errors | a dimension per device | errors | • | |
-| smartd_log.sata_interface_downshift | a dimension per device | events | | • |
-| smartd_log.udma_crc_error_count | a dimension per device | errors | | • |
-| smartd_log.throughput_performance | a dimension per device | value | | • |
-| smartd_log.seek_time_performance | a dimension per device | value | | • |
-| smartd_log.start_stop_count | a dimension per device | events | | • |
-| smartd_log.power_on_hours_count | a dimension per device | hours | | • |
-| smartd_log.power_cycle_count | a dimension per device | events | | • |
-| smartd_log.unexpected_power_loss | a dimension per device | events | | • |
-| smartd_log.spin_up_time | a dimension per device | ms | | • |
-| smartd_log.spin_up_retries | a dimension per device | retries | | • |
-| smartd_log.calibration_retries | a dimension per device | retries | | • |
-| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | • |
-| smartd_log.temperature_celsius | a dimension per device | celsius | • | • |
-| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | • |
-| smartd_log.reserved_block_count | a dimension per device | percentage | | • |
-| smartd_log.program_fail_count | a dimension per device | errors | | • |
-| smartd_log.erase_fail_count | a dimension per device | failures | | • |
-| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | • |
-| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | • |
-| smartd_log.reallocation_event_count | a dimension per device | events | | • |
-| smartd_log.current_pending_sector_count | a dimension per device | sectors | | • |
-| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | • |
-| smartd_log.percent_lifetime_used | a dimension per device | percentage | | • |
-| smartd_log.media_wearout_indicator | a dimension per device | percentage | | • |
-| smartd_log.nand_writes_1gib | a dimension per device | GiB | | • |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure `smartd` to write attribute information to files.
-
-`smartd` must be running with `-A` option to write `smartd` attribute information to files.
-
-For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
-
-```
-# dump smartd attrs info every 600 seconds
-smartd_opts="-A /var/log/smartd/ -i 600"
-```
-
-You may need to create the smartd directory before smartd will write to it:
-
-```sh
-mkdir -p /var/log/smartd
-```
-
-Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
-
-`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/smartd_log.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/smartd_log.conf
-```
-#### Options
-
-This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| log_path | path to smartd log files. | /var/log/smartd | yes |
-| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |
-| age | Time in minutes since the last dump to file. | 30 | no |
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-custom:
- name: smartd_log
- log_path: '/var/log/smartd/'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin smartd_log debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/smartd_log/metadata.yaml b/collectors/python.d.plugin/smartd_log/metadata.yaml
deleted file mode 100644
index d11949691..000000000
--- a/collectors/python.d.plugin/smartd_log/metadata.yaml
+++ /dev/null
@@ -1,429 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: smartd_log
- monitored_instance:
- name: S.M.A.R.T.
- link: "https://linux.die.net/man/8/smartd"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "smart.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - smart
- - S.M.A.R.T.
- - SCSI devices
- - ATA devices
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.
- method_description: |
- It reads `smartd` log files to collect the metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: Upon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure `smartd` to write attribute information to files.
- description: |
- `smartd` must be running with `-A` option to write `smartd` attribute information to files.
-
- For this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:
-
- ```
- # dump smartd attrs info every 600 seconds
- smartd_opts="-A /var/log/smartd/ -i 600"
- ```
-
- You may need to create the smartd directory before smartd will write to it:
-
- ```sh
- mkdir -p /var/log/smartd
- ```
-
- Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also <https://linux.die.net/man/8/smartd> for more info on the `-A --attributelog=PREFIX` command.
-
- `smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.
- configuration:
- file:
- name: "python.d/smartd_log.conf"
- options:
- description: |
- This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: log_path
- description: path to smartd log files.
- default_value: /var/log/smartd
- required: true
- - name: exclude_disks
- description: Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it.
- default_value: ""
- required: false
- - name: age
- description: Time in minutes since the last dump to file.
- default_value: 30
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- custom:
- name: smartd_log
- log_path: '/var/log/smartd/'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "The metrics listed below are split in terms of availability on device type, SCSI or ATA."
- availability:
- - "SCSI"
- - "ATA"
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: smartd_log.read_error_rate
- description: Read Error Rate
- availability:
- - ATA
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.seek_error_rate
- description: Seek Error Rate
- availability:
- - ATA
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.soft_read_error_rate
- description: Soft Read Error Rate
- availability:
- - ATA
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.write_error_rate
- description: Write Error Rate
- availability:
- - ATA
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.read_total_err_corrected
- description: Read Error Corrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.read_total_unc_errors
- description: Read Error Uncorrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.write_total_err_corrected
- description: Write Error Corrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.write_total_unc_errors
- description: Write Error Uncorrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.verify_total_err_corrected
- description: Verify Error Corrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.verify_total_unc_errors
- description: Verify Error Uncorrected
- availability:
- - SCSI
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.sata_interface_downshift
- description: SATA Interface Downshift
- availability:
- - ATA
- unit: "events"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.udma_crc_error_count
- description: UDMA CRC Error Count
- availability:
- - ATA
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.throughput_performance
- description: Throughput Performance
- availability:
- - ATA
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.seek_time_performance
- description: Seek Time Performance
- availability:
- - ATA
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.start_stop_count
- description: Start/Stop Count
- availability:
- - ATA
- unit: "events"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.power_on_hours_count
- description: Power-On Hours Count
- availability:
- - ATA
- unit: "hours"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.power_cycle_count
- description: Power Cycle Count
- availability:
- - ATA
- unit: "events"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.unexpected_power_loss
- description: Unexpected Power Loss
- availability:
- - ATA
- unit: "events"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.spin_up_time
- description: Spin-Up Time
- availability:
- - ATA
- unit: "ms"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.spin_up_retries
- description: Spin-up Retries
- availability:
- - ATA
- unit: "retries"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.calibration_retries
- description: Calibration Retries
- availability:
- - ATA
- unit: "retries"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.airflow_temperature_celsius
- description: Airflow Temperature Celsius
- availability:
- - ATA
- unit: "celsius"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.temperature_celsius
- description: Temperature
- availability:
- - SCSI
- - ATA
- unit: "celsius"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.reallocated_sectors_count
- description: Reallocated Sectors Count
- availability:
- - ATA
- unit: "sectors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.reserved_block_count
- description: Reserved Block Count
- availability:
- - ATA
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.program_fail_count
- description: Program Fail Count
- availability:
- - ATA
- unit: "errors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.erase_fail_count
- description: Erase Fail Count
- availability:
- - ATA
- unit: "failures"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.wear_leveller_worst_case_erase_count
- description: Wear Leveller Worst Case Erase Count
- availability:
- - ATA
- unit: "erases"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.unused_reserved_nand_blocks
- description: Unused Reserved NAND Blocks
- availability:
- - ATA
- unit: "blocks"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.reallocation_event_count
- description: Reallocation Event Count
- availability:
- - ATA
- unit: "events"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.current_pending_sector_count
- description: Current Pending Sector Count
- availability:
- - ATA
- unit: "sectors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.offline_uncorrectable_sector_count
- description: Offline Uncorrectable Sector Count
- availability:
- - ATA
- unit: "sectors"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.percent_lifetime_used
- description: Percent Lifetime Used
- availability:
- - ATA
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.media_wearout_indicator
- description: Media Wearout Indicator
- availability:
- - ATA
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per device
- - name: smartd_log.nand_writes_1gib
- description: NAND Writes
- availability:
- - ATA
- unit: "GiB"
- chart_type: line
- dimensions:
- - name: a dimension per device
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
deleted file mode 100644
index a896164df..000000000
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ /dev/null
@@ -1,790 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: smart netdata python.d module
-# Author: ilyam8, vorph1
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-from copy import deepcopy
-from time import time
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import read_last_line
-
-INCREMENTAL = 'incremental'
-ABSOLUTE = 'absolute'
-
-ATA = 'ata'
-SCSI = 'scsi'
-CSV = '.csv'
-
-DEF_RESCAN_INTERVAL = 60
-DEF_AGE = 30
-DEF_PATH = '/var/log/smartd'
-
-ATTR1 = '1'
-ATTR2 = '2'
-ATTR3 = '3'
-ATTR4 = '4'
-ATTR5 = '5'
-ATTR7 = '7'
-ATTR8 = '8'
-ATTR9 = '9'
-ATTR10 = '10'
-ATTR11 = '11'
-ATTR12 = '12'
-ATTR13 = '13'
-ATTR170 = '170'
-ATTR171 = '171'
-ATTR172 = '172'
-ATTR173 = '173'
-ATTR174 = '174'
-ATTR177 = '177'
-ATTR180 = '180'
-ATTR183 = '183'
-ATTR190 = '190'
-ATTR194 = '194'
-ATTR196 = '196'
-ATTR197 = '197'
-ATTR198 = '198'
-ATTR199 = '199'
-ATTR202 = '202'
-ATTR206 = '206'
-ATTR233 = '233'
-ATTR241 = '241'
-ATTR242 = '242'
-ATTR249 = '249'
-ATTR_READ_ERR_COR = 'read-total-err-corrected'
-ATTR_READ_ERR_UNC = 'read-total-unc-errors'
-ATTR_WRITE_ERR_COR = 'write-total-err-corrected'
-ATTR_WRITE_ERR_UNC = 'write-total-unc-errors'
-ATTR_VERIFY_ERR_COR = 'verify-total-err-corrected'
-ATTR_VERIFY_ERR_UNC = 'verify-total-unc-errors'
-ATTR_TEMPERATURE = 'temperature'
-
-RE_ATA = re.compile(
- '(\d+);' # attribute
- '(\d+);' # normalized value
- '(\d+)', # raw value
- re.X
-)
-
-RE_SCSI = re.compile(
- '([a-z-]+);' # attribute
- '([0-9.]+)', # raw value
- re.X
-)
-
-ORDER = [
- # errors
- 'read_error_rate',
- 'seek_error_rate',
- 'soft_read_error_rate',
- 'write_error_rate',
- 'read_total_err_corrected',
- 'read_total_unc_errors',
- 'write_total_err_corrected',
- 'write_total_unc_errors',
- 'verify_total_err_corrected',
- 'verify_total_unc_errors',
- # external failure
- 'sata_interface_downshift',
- 'udma_crc_error_count',
- # performance
- 'throughput_performance',
- 'seek_time_performance',
- # power
- 'start_stop_count',
- 'power_on_hours_count',
- 'power_cycle_count',
- 'unexpected_power_loss',
- # spin
- 'spin_up_time',
- 'spin_up_retries',
- 'calibration_retries',
- # temperature
- 'airflow_temperature_celsius',
- 'temperature_celsius',
- # wear
- 'reallocated_sectors_count',
- 'reserved_block_count',
- 'program_fail_count',
- 'erase_fail_count',
- 'wear_leveller_worst_case_erase_count',
- 'unused_reserved_nand_blocks',
- 'reallocation_event_count',
- 'current_pending_sector_count',
- 'offline_uncorrectable_sector_count',
- 'percent_lifetime_used',
- 'media_wearout_indicator',
- 'total_lbas_written',
- 'total_lbas_read',
-]
-
-CHARTS = {
- 'read_error_rate': {
- 'options': [None, 'Read Error Rate', 'value', 'errors', 'smartd_log.read_error_rate', 'line'],
- 'lines': [],
- 'attrs': [ATTR1],
- 'algo': ABSOLUTE,
- },
- 'seek_error_rate': {
- 'options': [None, 'Seek Error Rate', 'value', 'errors', 'smartd_log.seek_error_rate', 'line'],
- 'lines': [],
- 'attrs': [ATTR7],
- 'algo': ABSOLUTE,
- },
- 'soft_read_error_rate': {
- 'options': [None, 'Soft Read Error Rate', 'errors', 'errors', 'smartd_log.soft_read_error_rate', 'line'],
- 'lines': [],
- 'attrs': [ATTR13],
- 'algo': INCREMENTAL,
- },
- 'write_error_rate': {
- 'options': [None, 'Write Error Rate', 'value', 'errors', 'smartd_log.write_error_rate', 'line'],
- 'lines': [],
- 'attrs': [ATTR206],
- 'algo': ABSOLUTE,
- },
- 'read_total_err_corrected': {
- 'options': [None, 'Read Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'],
- 'lines': [],
- 'attrs': [ATTR_READ_ERR_COR],
- 'algo': INCREMENTAL,
- },
- 'read_total_unc_errors': {
- 'options': [None, 'Read Error Uncorrected', 'errors', 'errors', 'smartd_log.read_total_unc_errors', 'line'],
- 'lines': [],
- 'attrs': [ATTR_READ_ERR_UNC],
- 'algo': INCREMENTAL,
- },
- 'write_total_err_corrected': {
- 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.write_total_err_corrected', 'line'],
- 'lines': [],
- 'attrs': [ATTR_WRITE_ERR_COR],
- 'algo': INCREMENTAL,
- },
- 'write_total_unc_errors': {
- 'options': [None, 'Write Error Uncorrected', 'errors', 'errors', 'smartd_log.write_total_unc_errors', 'line'],
- 'lines': [],
- 'attrs': [ATTR_WRITE_ERR_UNC],
- 'algo': INCREMENTAL,
- },
- 'verify_total_err_corrected': {
- 'options': [None, 'Verify Error Corrected', 'errors', 'errors', 'smartd_log.verify_total_err_corrected',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR_VERIFY_ERR_COR],
- 'algo': INCREMENTAL,
- },
- 'verify_total_unc_errors': {
- 'options': [None, 'Verify Error Uncorrected', 'errors', 'errors', 'smartd_log.verify_total_unc_errors', 'line'],
- 'lines': [],
- 'attrs': [ATTR_VERIFY_ERR_UNC],
- 'algo': INCREMENTAL,
- },
- 'sata_interface_downshift': {
- 'options': [None, 'SATA Interface Downshift', 'events', 'external failure',
- 'smartd_log.sata_interface_downshift', 'line'],
- 'lines': [],
- 'attrs': [ATTR183],
- 'algo': INCREMENTAL,
- },
- 'udma_crc_error_count': {
- 'options': [None, 'UDMA CRC Error Count', 'errors', 'external failure', 'smartd_log.udma_crc_error_count',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR199],
- 'algo': INCREMENTAL,
- },
- 'throughput_performance': {
- 'options': [None, 'Throughput Performance', 'value', 'performance', 'smartd_log.throughput_performance',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR2],
- 'algo': ABSOLUTE,
- },
- 'seek_time_performance': {
- 'options': [None, 'Seek Time Performance', 'value', 'performance', 'smartd_log.seek_time_performance', 'line'],
- 'lines': [],
- 'attrs': [ATTR8],
- 'algo': ABSOLUTE,
- },
- 'start_stop_count': {
- 'options': [None, 'Start/Stop Count', 'events', 'power', 'smartd_log.start_stop_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR4],
- 'algo': ABSOLUTE,
- },
- 'power_on_hours_count': {
- 'options': [None, 'Power-On Hours Count', 'hours', 'power', 'smartd_log.power_on_hours_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR9],
- 'algo': ABSOLUTE,
- },
- 'power_cycle_count': {
- 'options': [None, 'Power Cycle Count', 'events', 'power', 'smartd_log.power_cycle_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR12],
- 'algo': ABSOLUTE,
- },
- 'unexpected_power_loss': {
- 'options': [None, 'Unexpected Power Loss', 'events', 'power', 'smartd_log.unexpected_power_loss', 'line'],
- 'lines': [],
- 'attrs': [ATTR174],
- 'algo': ABSOLUTE,
- },
- 'spin_up_time': {
- 'options': [None, 'Spin-Up Time', 'ms', 'spin', 'smartd_log.spin_up_time', 'line'],
- 'lines': [],
- 'attrs': [ATTR3],
- 'algo': ABSOLUTE,
- },
- 'spin_up_retries': {
- 'options': [None, 'Spin-up Retries', 'retries', 'spin', 'smartd_log.spin_up_retries', 'line'],
- 'lines': [],
- 'attrs': [ATTR10],
- 'algo': INCREMENTAL,
- },
- 'calibration_retries': {
- 'options': [None, 'Calibration Retries', 'retries', 'spin', 'smartd_log.calibration_retries', 'line'],
- 'lines': [],
- 'attrs': [ATTR11],
- 'algo': INCREMENTAL,
- },
- 'airflow_temperature_celsius': {
- 'options': [None, 'Airflow Temperature Celsius', 'celsius', 'temperature',
- 'smartd_log.airflow_temperature_celsius', 'line'],
- 'lines': [],
- 'attrs': [ATTR190],
- 'algo': ABSOLUTE,
- },
- 'temperature_celsius': {
- 'options': [None, 'Temperature', 'celsius', 'temperature', 'smartd_log.temperature_celsius', 'line'],
- 'lines': [],
- 'attrs': [ATTR194, ATTR_TEMPERATURE],
- 'algo': ABSOLUTE,
- },
- 'reallocated_sectors_count': {
- 'options': [None, 'Reallocated Sectors Count', 'sectors', 'wear', 'smartd_log.reallocated_sectors_count',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR5],
- 'algo': ABSOLUTE,
- },
- 'reserved_block_count': {
- 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR170],
- 'algo': ABSOLUTE,
- },
- 'program_fail_count': {
- 'options': [None, 'Program Fail Count', 'errors', 'wear', 'smartd_log.program_fail_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR171],
- 'algo': INCREMENTAL,
- },
- 'erase_fail_count': {
- 'options': [None, 'Erase Fail Count', 'failures', 'wear', 'smartd_log.erase_fail_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR172],
- 'algo': INCREMENTAL,
- },
- 'wear_leveller_worst_case_erase_count': {
- 'options': [None, 'Wear Leveller Worst Case Erase Count', 'erases', 'wear',
- 'smartd_log.wear_leveller_worst_case_erase_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR173],
- 'algo': ABSOLUTE,
- },
- 'unused_reserved_nand_blocks': {
- 'options': [None, 'Unused Reserved NAND Blocks', 'blocks', 'wear', 'smartd_log.unused_reserved_nand_blocks',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR180],
- 'algo': ABSOLUTE,
- },
- 'reallocation_event_count': {
- 'options': [None, 'Reallocation Event Count', 'events', 'wear', 'smartd_log.reallocation_event_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR196],
- 'algo': INCREMENTAL,
- },
- 'current_pending_sector_count': {
- 'options': [None, 'Current Pending Sector Count', 'sectors', 'wear', 'smartd_log.current_pending_sector_count',
- 'line'],
- 'lines': [],
- 'attrs': [ATTR197],
- 'algo': ABSOLUTE,
- },
- 'offline_uncorrectable_sector_count': {
- 'options': [None, 'Offline Uncorrectable Sector Count', 'sectors', 'wear',
- 'smartd_log.offline_uncorrectable_sector_count', 'line'],
- 'lines': [],
- 'attrs': [ATTR198],
- 'algo': ABSOLUTE,
-
- },
- 'percent_lifetime_used': {
- 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
- 'lines': [],
- 'attrs': [ATTR202],
- 'algo': ABSOLUTE,
- },
- 'media_wearout_indicator': {
- 'options': [None, 'Media Wearout Indicator', 'percentage', 'wear', 'smartd_log.media_wearout_indicator', 'line'],
- 'lines': [],
- 'attrs': [ATTR233, ATTR177],
- 'algo': ABSOLUTE,
- },
- 'nand_writes_1gib': {
- 'options': [None, 'NAND Writes', 'GiB', 'wear', 'smartd_log.nand_writes_1gib', 'line'],
- 'lines': [],
- 'attrs': [ATTR249],
- 'algo': ABSOLUTE,
- },
- 'total_lbas_written': {
- 'options': [None, 'Total LBAs Written', 'sectors', 'wear', 'smartd_log.total_lbas_written', 'line'],
- 'lines': [],
- 'attrs': [ATTR241],
- 'algo': ABSOLUTE,
- },
- 'total_lbas_read': {
- 'options': [None, 'Total LBAs Read', 'sectors', 'wear', 'smartd_log.total_lbas_read', 'line'],
- 'lines': [],
- 'attrs': [ATTR242],
- 'algo': ABSOLUTE,
- },
-}
-
-# NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin
-# C code:
-# https://github.com/smartmontools/smartmontools/blob/master/smartmontools/atacmds.cpp#L2051
-#
-# Calling 'parse_temp' on the raw value will return a 4-tuple, containing
-# * temperature
-# * minimum
-# * maximum
-# * over-temperature count
-# substituting None for values it can't decode.
-#
-# Example:
-# >>> parse_temp(42952491042)
-# >>> (34, 10, 43, None)
-#
-#
-# def check_temp_word(i):
-# if i <= 0x7F:
-# return 0x11
-# elif i <= 0xFF:
-# return 0x01
-# elif 0xFF80 <= i:
-# return 0x10
-# return 0x00
-#
-#
-# def check_temp_range(t, b0, b1):
-# if b0 > b1:
-# t0, t1 = b1, b0
-# else:
-# t0, t1 = b0, b1
-#
-# if all([
-# -60 <= t0,
-# t0 <= t,
-# t <= t1,
-# t1 <= 120,
-# not (t0 == -1 and t1 <= 0)
-# ]):
-# return t0, t1
-# return None, None
-#
-#
-# def parse_temp(raw):
-# byte = list()
-# word = list()
-# for i in range(0, 6):
-# byte.append(0xFF & (raw >> (i * 8)))
-# for i in range(0, 3):
-# word.append(0xFFFF & (raw >> (i * 16)))
-#
-# ctwd = check_temp_word(word[0])
-#
-# if not word[2]:
-# if ctwd and not word[1]:
-# # byte[0] is temp, no other data
-# return byte[0], None, None, None
-#
-# if ctwd and all(check_temp_range(byte[0], byte[2], byte[3])):
-# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max
-# trange = check_temp_range(byte[0], byte[2], byte[3])
-# return byte[0], trange[0], trange[1], None
-#
-# if ctwd and all(check_temp_range(byte[0], byte[1], byte[2])):
-# # byte[0] is temp, byte[1] is max or min, byte[2] is min or max
-# trange = check_temp_range(byte[0], byte[1], byte[2])
-# return byte[0], trange[0], trange[1], None
-#
-# return None, None, None, None
-#
-# if ctwd:
-# if all(
-# [
-# ctwd & check_temp_word(word[1]) & check_temp_word(word[2]) != 0x00,
-# all(check_temp_range(byte[0], byte[2], byte[4])),
-# ]
-# ):
-# # byte[0] is temp, byte[2] is max or min, byte[4] is min or max
-# trange = check_temp_range(byte[0], byte[2], byte[4])
-# return byte[0], trange[0], trange[1], None
-# else:
-# trange = check_temp_range(byte[0], byte[2], byte[3])
-# if word[2] < 0x7FFF and all(trange) and trange[1] >= 40:
-# # byte[0] is temp, byte[2] is max or min, byte[3] is min or max, word[2] is overtemp count
-# return byte[0], trange[0], trange[1], word[2]
-# # no data
-# return None, None, None, None
-
-
-CHARTED_ATTRS = dict((attr, k) for k, v in CHARTS.items() for attr in v['attrs'])
-
-
-class BaseAtaSmartAttribute:
- def __init__(self, name, normalized_value, raw_value):
- self.name = name
- self.normalized_value = normalized_value
- self.raw_value = raw_value
-
- def value(self):
- raise NotImplementedError
-
-
-class AtaRaw(BaseAtaSmartAttribute):
- def value(self):
- return self.raw_value
-
-
-class AtaNormalized(BaseAtaSmartAttribute):
- def value(self):
- return self.normalized_value
-
-
-class Ata3(BaseAtaSmartAttribute):
- def value(self):
- value = int(self.raw_value)
- # https://github.com/netdata/netdata/issues/5919
- #
- # 3;151;38684000679;
- # 423 (Average 447)
- # 38684000679 & 0xFFF -> 423
- # (38684000679 & 0xFFF0000) >> 16 -> 447
- if value > 1e6:
- return value & 0xFFF
- return value
-
-
-class Ata9(BaseAtaSmartAttribute):
- def value(self):
- value = int(self.raw_value)
- if value > 1e6:
- return value & 0xFFFF
- return value
-
-
-class Ata190(BaseAtaSmartAttribute):
- def value(self):
- return 100 - int(self.normalized_value)
-
-
-class Ata194(BaseAtaSmartAttribute):
- # https://github.com/netdata/netdata/issues/3041
- # https://github.com/netdata/netdata/issues/5919
- #
- # The low byte is the current temperature, the third lowest is the maximum, and the fifth lowest is the minimum
- def value(self):
- value = int(self.raw_value)
- if value > 1e6:
- return value & 0xFF
- return min(int(self.normalized_value), int(self.raw_value))
-
-
-class BaseSCSISmartAttribute:
- def __init__(self, name, raw_value):
- self.name = name
- self.raw_value = raw_value
-
- def value(self):
- raise NotImplementedError
-
-
-class SCSIRaw(BaseSCSISmartAttribute):
- def value(self):
- return self.raw_value
-
-
-def ata_attribute_factory(value):
- name = value[0]
-
- if name == ATTR3:
- return Ata3(*value)
- elif name == ATTR9:
- return Ata9(*value)
- elif name == ATTR190:
- return Ata190(*value)
- elif name == ATTR194:
- return Ata194(*value)
- elif name in [
- ATTR1,
- ATTR7,
- ATTR177,
- ATTR202,
- ATTR206,
- ATTR233,
- ]:
- return AtaNormalized(*value)
-
- return AtaRaw(*value)
-
-
-def scsi_attribute_factory(value):
- return SCSIRaw(*value)
-
-
-def attribute_factory(value):
- name = value[0]
- if name.isdigit():
- return ata_attribute_factory(value)
- return scsi_attribute_factory(value)
-
-
-def handle_error(*errors):
- def on_method(method):
- def on_call(*args):
- try:
- return method(*args)
- except errors:
- return None
-
- return on_call
-
- return on_method
-
-
-class DiskLogFile:
- def __init__(self, full_path):
- self.path = full_path
- self.size = os.path.getsize(full_path)
-
- @handle_error(OSError)
- def is_changed(self):
- return self.size != os.path.getsize(self.path)
-
- @handle_error(OSError)
- def is_active(self, current_time, limit):
- return (current_time - os.path.getmtime(self.path)) / 60 < limit
-
- @handle_error(OSError)
- def read(self):
- self.size = os.path.getsize(self.path)
- return read_last_line(self.path)
-
-
-class BaseDisk:
- def __init__(self, name, log_file):
- self.raw_name = name
- self.name = re.sub(r'_+', '_', name)
- self.log_file = log_file
- self.attrs = list()
- self.alive = True
- self.charted = False
-
- def __eq__(self, other):
- if isinstance(other, BaseDisk):
- return self.raw_name == other.raw_name
- return self.raw_name == other
-
- def __ne__(self, other):
- return not self == other
-
- def __hash__(self):
- return hash(repr(self))
-
- def parser(self, data):
- raise NotImplementedError
-
- @handle_error(TypeError)
- def populate_attrs(self):
- self.attrs = list()
- line = self.log_file.read()
- for value in self.parser(line):
- self.attrs.append(attribute_factory(value))
-
- return len(self.attrs)
-
- def data(self):
- data = dict()
- for attr in self.attrs:
- data['{0}_{1}'.format(self.name, attr.name)] = attr.value()
- return data
-
-
-class ATADisk(BaseDisk):
- def parser(self, data):
- return RE_ATA.findall(data)
-
-
-class SCSIDisk(BaseDisk):
- def parser(self, data):
- return RE_SCSI.findall(data)
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.log_path = configuration.get('log_path', DEF_PATH)
- self.age = configuration.get('age', DEF_AGE)
- self.exclude = configuration.get('exclude_disks', str()).split()
- self.disks = list()
- self.runs = 0
- self.do_force_rescan = False
-
- def check(self):
- return self.scan() > 0
-
- def get_data(self):
- self.runs += 1
-
- if self.do_force_rescan or self.runs % DEF_RESCAN_INTERVAL == 0:
- self.cleanup()
- self.scan()
- self.do_force_rescan = False
-
- data = dict()
-
- for disk in self.disks:
- if not disk.alive:
- continue
-
- if not disk.charted:
- self.add_disk_to_charts(disk)
-
- changed = disk.log_file.is_changed()
-
- if changed is None:
- disk.alive = False
- self.do_force_rescan = True
- continue
-
- if changed and disk.populate_attrs() is None:
- disk.alive = False
- self.do_force_rescan = True
- continue
-
- data.update(disk.data())
-
- return data
-
- def cleanup(self):
- current_time = time()
- for disk in self.disks[:]:
- if any(
- [
- not disk.alive,
- not disk.log_file.is_active(current_time, self.age),
- ]
- ):
- self.disks.remove(disk.raw_name)
- self.remove_disk_from_charts(disk)
-
- def scan(self):
- self.debug('scanning {0}'.format(self.log_path))
- current_time = time()
-
- for full_name in os.listdir(self.log_path):
- disk = self.create_disk_from_file(full_name, current_time)
- if not disk:
- continue
- self.disks.append(disk)
-
- return len(self.disks)
-
- def create_disk_from_file(self, full_name, current_time):
- if not full_name.endswith(CSV):
- self.debug('skipping {0}: not a csv file'.format(full_name))
- return None
-
- name = os.path.basename(full_name).split('.')[-3]
- path = os.path.join(self.log_path, full_name)
-
- if name in self.disks:
- self.debug('skipping {0}: already in disks'.format(full_name))
- return None
-
- if [p for p in self.exclude if p in name]:
- self.debug('skipping {0}: filtered by `exclude` option'.format(full_name))
- return None
-
- if not os.access(path, os.R_OK):
- self.debug('skipping {0}: not readable'.format(full_name))
- return None
-
- if os.path.getsize(path) == 0:
- self.debug('skipping {0}: zero size'.format(full_name))
- return None
-
- if (current_time - os.path.getmtime(path)) / 60 > self.age:
- self.debug('skipping {0}: haven\'t been updated for last {1} minutes'.format(full_name, self.age))
- return None
-
- if ATA in full_name:
- disk = ATADisk(name, DiskLogFile(path))
- elif SCSI in full_name:
- disk = SCSIDisk(name, DiskLogFile(path))
- else:
- self.debug('skipping {0}: unknown type'.format(full_name))
- return None
-
- disk.populate_attrs()
- if not disk.attrs:
- self.error('skipping {0}: parsing failed'.format(full_name))
- return None
-
- self.debug('added {0}'.format(full_name))
- return disk
-
- def add_disk_to_charts(self, disk):
- if len(self.charts) == 0 or disk.charted:
- return
- disk.charted = True
-
- for attr in disk.attrs:
- chart_id = CHARTED_ATTRS.get(attr.name)
-
- if not chart_id or chart_id not in self.charts:
- continue
-
- chart = self.charts[chart_id]
- dim = [
- '{0}_{1}'.format(disk.name, attr.name),
- disk.name,
- CHARTS[chart_id]['algo'],
- ]
-
- if dim[0] in self.charts[chart_id].dimensions:
- chart.hide_dimension(dim[0], reverse=True)
- else:
- chart.add_dimension(dim)
-
- def remove_disk_from_charts(self, disk):
- if len(self.charts) == 0 or not disk.charted:
- return
-
- for attr in disk.attrs:
- chart_id = CHARTED_ATTRS.get(attr.name)
-
- if not chart_id or chart_id not in self.charts:
- continue
-
- self.charts[chart_id].del_dimension('{0}_{1}'.format(disk.name, attr.name))
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
deleted file mode 100644
index 3e81317f1..000000000
--- a/collectors/python.d.plugin/smartd_log/smartd_log.conf
+++ /dev/null
@@ -1,76 +0,0 @@
-# netdata python.d.plugin configuration for smartd log
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, smartd_log also supports the following:
-#
-# log_path: '/path/to/smartd_logs' # path to smartd log files. Default is /var/log/smartd
-# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
-# age: 30 # time in minutes since the last dump to file. If smartd has not dumped data within this time the job exits.
-#
-# ----------------------------------------------------------------------
-
-custom:
- name: smartd_log
- log_path: '/var/log/smartd/'
-
-debian:
- name: smartd_log
- log_path: '/var/lib/smartmontools/'
diff --git a/collectors/python.d.plugin/spigotmc/Makefile.inc b/collectors/python.d.plugin/spigotmc/Makefile.inc
deleted file mode 100644
index f9fa8b6b0..000000000
--- a/collectors/python.d.plugin/spigotmc/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += spigotmc/spigotmc.chart.py
-dist_pythonconfig_DATA += spigotmc/spigotmc.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += spigotmc/README.md spigotmc/Makefile.inc
-
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
deleted file mode 120000
index 66e5c9c47..000000000
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/spigotmc.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
deleted file mode 100644
index 55ec8fa22..000000000
--- a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
+++ /dev/null
@@ -1,216 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/metadata.yaml"
-sidebar_label: "SpigotMC"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Gaming"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# SpigotMC
-
-
-<img src="https://netdata.cloud/img/spigot.jfif" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: spigotmc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
-
-
-It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
-
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per SpigotMC instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |
-| spigotmc.users | Users | users |
-| spigotmc.mem | used, allocated, max | MiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the Remote Console Protocol
-
-Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
-
-This will allow the Server to listen and respond to queries over the rcon protocol.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/spigotmc.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/spigotmc.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | The host's IP to connect to. | localhost | yes |
-| port | The port the remote console is listening on. | 25575 | yes |
-| password | Remote console password if any. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-local:
- name: local_server
- url: 127.0.0.1
- port: 25575
-
-```
-##### Basic Authentication
-
-An example using basic password for authentication with the remote console.
-
-<details><summary>Config</summary>
-
-```yaml
-local:
- name: local_server_pass
- url: 127.0.0.1
- port: 25575
- password: 'foobar'
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-local_server:
- name : my_local_server
- url : 127.0.0.1
- port: 25575
-
-remote_server:
- name : another_remote_server
- url : 192.0.2.1
- port: 25575
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin spigotmc debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/spigotmc/metadata.yaml b/collectors/python.d.plugin/spigotmc/metadata.yaml
deleted file mode 100644
index 5dea9f0c8..000000000
--- a/collectors/python.d.plugin/spigotmc/metadata.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: spigotmc
- monitored_instance:
- name: SpigotMC
- link: ""
- categories:
- - data-collection.gaming
- icon_filename: "spigot.jfif"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - minecraft server
- - spigotmc server
- - spigot
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
- method_description: |
- It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the Remote Console Protocol
- description: |
- Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
-
- This will allow the Server to listen and respond to queries over the rcon protocol.
- configuration:
- file:
- name: "python.d/spigotmc.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
- running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: The host's IP to connect to.
- default_value: localhost
- required: true
- - name: port
- description: The port the remote console is listening on.
- default_value: 25575
- required: true
- - name: password
- description: Remote console password if any.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- local:
- name: local_server
- url: 127.0.0.1
- port: 25575
- - name: Basic Authentication
- description: An example using basic password for authentication with the remote console.
- config: |
- local:
- name: local_server_pass
- url: 127.0.0.1
- port: 25575
- password: 'foobar'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local_server:
- name : my_local_server
- url : 127.0.0.1
- port: 25575
-
- remote_server:
- name : another_remote_server
- url : 192.0.2.1
- port: 25575
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: spigotmc.tps
- description: Spigot Ticks Per Second
- unit: "ticks"
- chart_type: line
- dimensions:
- - name: 1 Minute Average
- - name: 5 Minute Average
- - name: 15 Minute Average
- - name: spigotmc.users
- description: Minecraft Users
- unit: "users"
- chart_type: area
- dimensions:
- - name: Users
- - name: spigotmc.mem
- description: Minecraft Memory Usage
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: used
- - name: allocated
- - name: max
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
deleted file mode 100644
index 81370fb4c..000000000
--- a/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: spigotmc netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import platform
-import re
-import socket
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import mcrcon
-
-# Update only every 5 seconds because collection takes in excess of
-# 100ms sometimes, and most people won't care about second-by-second data.
-update_every = 5
-
-PRECISION = 100
-
-COMMAND_TPS = 'tps'
-COMMAND_LIST = 'list'
-COMMAND_ONLINE = 'online'
-
-ORDER = [
- 'tps',
- 'mem',
- 'users',
-]
-
-CHARTS = {
- 'tps': {
- 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
- 'lines': [
- ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
- ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
- ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
- ]
- },
- 'users': {
- 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
- 'lines': [
- ['users', 'Users', 'absolute', 1, 1]
- ]
- },
- 'mem': {
- 'options': [None, 'Minecraft Memory Usage', 'MiB', 'spigotmc', 'spigotmc.mem', 'line'],
- 'lines': [
- ['mem_used', 'used', 'absolute', 1, 1],
- ['mem_alloc', 'allocated', 'absolute', 1, 1],
- ['mem_max', 'max', 'absolute', 1, 1]
- ]
- }
-}
-
-_TPS_REGEX = re.compile(
- # Examples:
- # §6TPS from last 1m, 5m, 15m: §a*20.0, §a*20.0, §a*20.0
- # §6Current Memory Usage: §a936/65536 mb (Max: 65536 mb)
- r'^.*: .*?' # Message lead-in
- r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
- r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
- r'(\d{1,2}\.\d+).*?' # 15-minute TPS value
- r'(\s.*?(\d+)\/(\d+).*?: (\d+).*)?', # Current Memory Usage / Total Memory (Max Memory)
- re.MULTILINE
-)
-_LIST_REGEX = re.compile(
- # Examples:
- # There are 4 of a max 50 players online: player1, player2, player3, player4
- # §6There are §c4§6 out of maximum §c50§6 players online.
- # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online.
- # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.
- # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50
- r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count.
- re.X
-)
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 25575)
- self.password = self.configuration.get('password', '')
- self.console = mcrcon.MCRcon()
- self.alive = True
-
- def check(self):
- if platform.system() != 'Linux':
- self.error('Only supported on Linux.')
- return False
- try:
- self.connect()
- except (mcrcon.MCRconException, socket.error) as err:
- self.error('Error connecting.')
- self.error(repr(err))
- return False
-
- return self._get_data()
-
- def connect(self):
- self.console.connect(self.host, self.port, self.password)
-
- def reconnect(self):
- self.error('try reconnect.')
- try:
- try:
- self.console.disconnect()
- except mcrcon.MCRconException:
- pass
- self.console.connect(self.host, self.port, self.password)
- self.alive = True
- except (mcrcon.MCRconException, socket.error) as err:
- self.error('Error connecting.')
- self.error(repr(err))
- return False
- return True
-
- def is_alive(self):
- if any(
- [
- not self.alive,
- self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
- ]
- ):
- return self.reconnect()
- return True
-
- def _get_data(self):
- if not self.is_alive():
- return None
-
- data = {}
-
- try:
- raw = self.console.command(COMMAND_TPS)
- match = _TPS_REGEX.match(raw)
- if match:
- data['tps1'] = int(float(match.group(1)) * PRECISION)
- data['tps5'] = int(float(match.group(2)) * PRECISION)
- data['tps15'] = int(float(match.group(3)) * PRECISION)
- if match.group(4):
- data['mem_used'] = int(match.group(5))
- data['mem_alloc'] = int(match.group(6))
- data['mem_max'] = int(match.group(7))
- else:
- self.error('Unable to process TPS values.')
- if not raw:
- self.error(
- "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
- except mcrcon.MCRconException:
- self.error('Unable to fetch TPS values.')
- except socket.error:
- self.error('Connection is dead.')
- self.alive = False
- return None
-
- try:
- raw = self.console.command(COMMAND_LIST)
- match = _LIST_REGEX.search(raw)
- if not match:
- raw = self.console.command(COMMAND_ONLINE)
- match = _LIST_REGEX.search(raw)
- if match:
- users = int(match.group(1))
- hidden_users = match.group(2)
- if hidden_users:
- hidden_users = int(hidden_users)
- else:
- hidden_users = 0
- data['users'] = users + hidden_users
- else:
- if not raw:
- self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format(
- COMMAND_LIST, COMMAND_ONLINE))
- self.error('Unable to process user counts.')
- except mcrcon.MCRconException:
- self.error('Unable to fetch user counts.')
- except socket.error:
- self.error('Connection is dead.')
- self.alive = False
- return None
-
- return data
diff --git a/collectors/python.d.plugin/spigotmc/spigotmc.conf b/collectors/python.d.plugin/spigotmc/spigotmc.conf
deleted file mode 100644
index f0064ea2f..000000000
--- a/collectors/python.d.plugin/spigotmc/spigotmc.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for spigotmc
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# In addition to the above, spigotmc supports the following:
-#
-# host: localhost # The host to connect to. Defaults to the local system.
-# port: 25575 # The port the remote console is listening on.
-# password: '' # The remote console password. Most be set correctly.
diff --git a/collectors/python.d.plugin/squid/Makefile.inc b/collectors/python.d.plugin/squid/Makefile.inc
deleted file mode 100644
index 76ecff81e..000000000
--- a/collectors/python.d.plugin/squid/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += squid/squid.chart.py
-dist_pythonconfig_DATA += squid/squid.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += squid/README.md squid/Makefile.inc
-
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
deleted file mode 120000
index c4e5a03d7..000000000
--- a/collectors/python.d.plugin/squid/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/squid.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/squid/integrations/squid.md b/collectors/python.d.plugin/squid/integrations/squid.md
deleted file mode 100644
index 6599826da..000000000
--- a/collectors/python.d.plugin/squid/integrations/squid.md
+++ /dev/null
@@ -1,199 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/metadata.yaml"
-sidebar_label: "Squid"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Squid
-
-
-<img src="https://netdata.cloud/img/squid.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: squid
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
-
-
-It collects metrics from the endpoint where Squid exposes its `counters` data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Squid instance
-
-These metrics refer to each monitored Squid instance.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| squid.clients_net | in, out, hits | kilobits/s |
-| squid.clients_requests | requests, hits, errors | requests/s |
-| squid.servers_net | in, out | kilobits/s |
-| squid.servers_requests | requests, errors | requests/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure Squid's Cache Manager
-
-Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/squid.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/squid.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
-| host | The host to connect to. | | yes |
-| port | The port to connect to. | | yes |
-| request | The URL to request from Squid. | | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-example_job_name:
- name: 'local'
- host: 'localhost'
- port: 3128
- request: 'cache_object://localhost:3128/counters'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-local_job:
- name: 'local'
- host: '127.0.0.1'
- port: 3128
- request: 'cache_object://127.0.0.1:3128/counters'
-
-remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 3128
- request: 'cache_object://192.0.2.1:3128/counters'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin squid debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/squid/metadata.yaml b/collectors/python.d.plugin/squid/metadata.yaml
deleted file mode 100644
index d0c5b3ecc..000000000
--- a/collectors/python.d.plugin/squid/metadata.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: squid
- monitored_instance:
- name: Squid
- link: "http://www.squid-cache.org/"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "squid.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - squid
- - web delivery
- - squid caching proxy
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
- method_description: "It collects metrics from the endpoint where Squid exposes its `counters` data."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure Squid's Cache Manager
- description: |
- Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
- configuration:
- file:
- name: "python.d/squid.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: "local"
- required: false
- - name: host
- description: The host to connect to.
- default_value: ""
- required: true
- - name: port
- description: The port to connect to.
- default_value: ""
- required: true
- - name: request
- description: The URL to request from Squid.
- default_value: ""
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- example_job_name:
- name: 'local'
- host: 'localhost'
- port: 3128
- request: 'cache_object://localhost:3128/counters'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local_job:
- name: 'local'
- host: '127.0.0.1'
- port: 3128
- request: 'cache_object://127.0.0.1:3128/counters'
-
- remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 3128
- request: 'cache_object://192.0.2.1:3128/counters'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: Squid instance
- description: "These metrics refer to each monitored Squid instance."
- labels: []
- metrics:
- - name: squid.clients_net
- description: Squid Client Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: hits
- - name: squid.clients_requests
- description: Squid Client Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: hits
- - name: errors
- - name: squid.servers_net
- description: Squid Server Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: in
- - name: out
- - name: squid.servers_requests
- description: Squid Server Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: errors
diff --git a/collectors/python.d.plugin/squid/squid.chart.py b/collectors/python.d.plugin/squid/squid.chart.py
deleted file mode 100644
index bcae2d892..000000000
--- a/collectors/python.d.plugin/squid/squid.chart.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: squid netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'clients_net',
- 'clients_requests',
- 'servers_net',
- 'servers_requests',
-]
-
-CHARTS = {
- 'clients_net': {
- 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
- 'lines': [
- ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
- ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
- ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
- ]
- },
- 'clients_requests': {
- 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
- 'lines': [
- ['client_http_requests', 'requests', 'incremental'],
- ['client_http_hits', 'hits', 'incremental'],
- ['client_http_errors', 'errors', 'incremental', -1, 1]
- ]
- },
- 'servers_net': {
- 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
- 'lines': [
- ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
- ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
- ]
- },
- 'servers_requests': {
- 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
- 'lines': [
- ['server_all_requests', 'requests', 'incremental'],
- ['server_all_errors', 'errors', 'incremental', -1, 1]
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self._keep_alive = True
- self.request = ''
- self.host = 'localhost'
- self.port = 3128
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Get data via http request
- :return: dict
- """
- response = self._get_raw_data()
-
- data = dict()
- try:
- raw = ''
- for tmp in response.split('\r\n'):
- if tmp.startswith('sample_time'):
- raw = tmp
- break
-
- if raw.startswith('<'):
- self.error('invalid data received')
- return None
-
- for row in raw.split('\n'):
- if row.startswith(('client', 'server.all')):
- tmp = row.split('=')
- data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
-
- except (ValueError, AttributeError, TypeError):
- self.error('invalid data received')
- return None
-
- if not data:
- self.error('no data received')
- return None
- return data
-
- def _check_raw_data(self, data):
- header = data[:1024].lower()
-
- if 'connection: keep-alive' in header:
- self._keep_alive = True
- else:
- self._keep_alive = False
-
- if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
- self.debug('received full response from squid')
- return True
-
- self.debug('waiting more data from squid')
- return False
-
- def check(self):
- """
- Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
- :return: boolean
- """
- self._parse_config()
- # format request
- req = self.request.decode()
- if not req.startswith('GET'):
- req = 'GET ' + req
- if not req.endswith(' HTTP/1.1\r\n\r\n'):
- req += ' HTTP/1.1\r\n\r\n'
- self.request = req.encode()
- if self._get_data() is not None:
- return True
- else:
- return False
diff --git a/collectors/python.d.plugin/squid/squid.conf b/collectors/python.d.plugin/squid/squid.conf
deleted file mode 100644
index b90a52c0c..000000000
--- a/collectors/python.d.plugin/squid/squid.conf
+++ /dev/null
@@ -1,167 +0,0 @@
-# netdata python.d.plugin configuration for squid
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, squid also supports the following:
-#
-# host : 'IP or HOSTNAME' # the host to connect to
-# port : PORT # the port to connect to
-# request: 'URL' # the URL to request from squid
-#
-
-# ----------------------------------------------------------------------
-# SQUID CONFIGURATION
-#
-# See:
-# http://wiki.squid-cache.org/Features/CacheManager
-#
-# In short, add to your squid configuration these:
-#
-# http_access allow localhost manager
-# http_access deny manager
-#
-# To remotely monitor a squid:
-#
-# acl managerAdmin src 192.0.2.1
-# http_access allow localhost manager
-# http_access allow managerAdmin manager
-# http_access deny manager
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-tcp3128old:
- name : 'local'
- host : 'localhost'
- port : 3128
- request : 'cache_object://localhost:3128/counters'
-
-tcp8080old:
- name : 'local'
- host : 'localhost'
- port : 8080
- request : 'cache_object://localhost:3128/counters'
-
-tcp3128new:
- name : 'local'
- host : 'localhost'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080new:
- name : 'local'
- host : 'localhost'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
-# IPv4
-
-tcp3128oldipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 3128
- request : 'cache_object://127.0.0.1:3128/counters'
-
-tcp8080oldipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 8080
- request : 'cache_object://127.0.0.1:3128/counters'
-
-tcp3128newipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080newipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
-# IPv6
-
-tcp3128oldipv6:
- name : 'local'
- host : '::1'
- port : 3128
- request : 'cache_object://[::1]:3128/counters'
-
-tcp8080oldipv6:
- name : 'local'
- host : '::1'
- port : 8080
- request : 'cache_object://[::1]:3128/counters'
-
-tcp3128newipv6:
- name : 'local'
- host : '::1'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080newipv6:
- name : 'local'
- host : '::1'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
diff --git a/collectors/python.d.plugin/tomcat/Makefile.inc b/collectors/python.d.plugin/tomcat/Makefile.inc
deleted file mode 100644
index 940a7835e..000000000
--- a/collectors/python.d.plugin/tomcat/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += tomcat/tomcat.chart.py
-dist_pythonconfig_DATA += tomcat/tomcat.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += tomcat/README.md tomcat/Makefile.inc
-
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
deleted file mode 120000
index 997090c35..000000000
--- a/collectors/python.d.plugin/tomcat/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/tomcat.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/tomcat/integrations/tomcat.md b/collectors/python.d.plugin/tomcat/integrations/tomcat.md
deleted file mode 100644
index 883f29dd3..000000000
--- a/collectors/python.d.plugin/tomcat/integrations/tomcat.md
+++ /dev/null
@@ -1,203 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/metadata.yaml"
-sidebar_label: "Tomcat"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Tomcat
-
-
-<img src="https://netdata.cloud/img/tomcat.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: tomcat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
-
-
-It parses the information provided by the http endpoint of the `/manager/status` in XML format
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.
-
-#### Limits
-
-This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Tomcat instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| tomcat.accesses | accesses, errors | requests/s |
-| tomcat.bandwidth | sent, received | KiB/s |
-| tomcat.processing_time | processing time | seconds |
-| tomcat.threads | current, busy | current threads |
-| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |
-| tomcat.jvm_eden | used, committed, max | MiB |
-| tomcat.jvm_survivor | used, committed, max | MiB |
-| tomcat.jvm_tenured | used, committed, max | MiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Create a read-only `netdata` user, to monitor the `/status` endpoint.
-
-This is necessary for configuring the collector.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/tomcat.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/tomcat.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options per job</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |
-| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |
-| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |
-| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
-
-```
-##### Using an IPv4 endpoint
-
-A typical configuration using an IPv4 endpoint
-
-<details><summary>Config</summary>
-
-```yaml
-local_ipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
-
-```
-</details>
-
-##### Using an IPv6 endpoint
-
-A typical configuration using an IPv6 endpoint
-
-<details><summary>Config</summary>
-
-```yaml
-local_ipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin tomcat debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/tomcat/metadata.yaml b/collectors/python.d.plugin/tomcat/metadata.yaml
deleted file mode 100644
index e68526073..000000000
--- a/collectors/python.d.plugin/tomcat/metadata.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: tomcat
- monitored_instance:
- name: Tomcat
- link: "https://tomcat.apache.org/"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "tomcat.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - apache
- - tomcat
- - webserver
- - websocket
- - jakarta
- - javaEE
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
- method_description: |
- It parses the information provided by the http endpoint of the `/manager/status` in XML format
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint"
- default_behavior:
- auto_detection:
- description: "If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail."
- limits:
- description: "This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port"
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Create a read-only `netdata` user, to monitor the `/status` endpoint.
- description: This is necessary for configuring the collector.
- configuration:
- file:
- name: "python.d/tomcat.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options per job"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: url
- description: The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true.
- default_value: no
- required: true
- - name: user
- description: A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected
- default_value: no
- required: false
- - name: pass
- description: A valid password for the user in question. Required if the endpoint is password protected
- default_value: no
- required: false
- - name: connector_name
- description: The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
- - name: Using an IPv4 endpoint
- description: A typical configuration using an IPv4 endpoint
- config: |
- local_ipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
- - name: Using an IPv6 endpoint
- description: A typical configuration using an IPv6 endpoint
- config: |
- local_ipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: tomcat.accesses
- description: Requests
- unit: "requests/s"
- chart_type: area
- dimensions:
- - name: accesses
- - name: errors
- - name: tomcat.bandwidth
- description: Bandwidth
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: sent
- - name: received
- - name: tomcat.processing_time
- description: processing time
- unit: "seconds"
- chart_type: area
- dimensions:
- - name: processing time
- - name: tomcat.threads
- description: Threads
- unit: "current threads"
- chart_type: area
- dimensions:
- - name: current
- - name: busy
- - name: tomcat.jvm
- description: JVM Memory Pool Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: eden
- - name: survivor
- - name: tenured
- - name: code cache
- - name: compressed
- - name: metaspace
- - name: tomcat.jvm_eden
- description: Eden Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
- - name: tomcat.jvm_survivor
- description: Survivor Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
- - name: tomcat.jvm_tenured
- description: Tenured Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
diff --git a/collectors/python.d.plugin/tomcat/tomcat.chart.py b/collectors/python.d.plugin/tomcat/tomcat.chart.py
deleted file mode 100644
index 90315f8c7..000000000
--- a/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: tomcat netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Wei He (Wing924)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-import xml.etree.ElementTree as ET
-
-from bases.FrameworkServices.UrlService import UrlService
-
-MiB = 1 << 20
-
-# Regex fix for Tomcat single quote XML attributes
-# affecting Tomcat < 8.5.24 & 9.0.2 running with Java > 9
-# cf. https://bz.apache.org/bugzilla/show_bug.cgi?id=61603
-single_quote_regex = re.compile(r"='([^']+)'([^']+)''")
-
-ORDER = [
- 'accesses',
- 'bandwidth',
- 'processing_time',
- 'threads',
- 'jvm',
- 'jvm_eden',
- 'jvm_survivor',
- 'jvm_tenured',
-]
-
-CHARTS = {
- 'accesses': {
- 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
- 'lines': [
- ['requestCount', 'accesses', 'incremental'],
- ['errorCount', 'errors', 'incremental'],
- ]
- },
- 'bandwidth': {
- 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
- 'lines': [
- ['bytesSent', 'sent', 'incremental', 1, 1024],
- ['bytesReceived', 'received', 'incremental', 1, 1024],
- ]
- },
- 'processing_time': {
- 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
- 'lines': [
- ['processingTime', 'processing time', 'incremental', 1, 1000]
- ]
- },
- 'threads': {
- 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
- 'lines': [
- ['currentThreadCount', 'current', 'absolute'],
- ['currentThreadsBusy', 'busy', 'absolute']
- ]
- },
- 'jvm': {
- 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
- 'lines': [
- ['free', 'free', 'absolute', 1, MiB],
- ['eden_used', 'eden', 'absolute', 1, MiB],
- ['survivor_used', 'survivor', 'absolute', 1, MiB],
- ['tenured_used', 'tenured', 'absolute', 1, MiB],
- ['code_cache_used', 'code cache', 'absolute', 1, MiB],
- ['compressed_used', 'compressed', 'absolute', 1, MiB],
- ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
- ]
- },
- 'jvm_eden': {
- 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
- 'lines': [
- ['eden_used', 'used', 'absolute', 1, MiB],
- ['eden_committed', 'committed', 'absolute', 1, MiB],
- ['eden_max', 'max', 'absolute', 1, MiB]
- ]
- },
- 'jvm_survivor': {
- 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
- 'lines': [
- ['survivor_used', 'used', 'absolute', 1, MiB],
- ['survivor_committed', 'committed', 'absolute', 1, MiB],
- ['survivor_max', 'max', 'absolute', 1, MiB],
- ]
- },
- 'jvm_tenured': {
- 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
- 'lines': [
- ['tenured_used', 'used', 'absolute', 1, MiB],
- ['tenured_committed', 'committed', 'absolute', 1, MiB],
- ['tenured_max', 'max', 'absolute', 1, MiB]
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
- self.connector_name = self.configuration.get('connector_name', None)
- self.parse = self.xml_parse
-
- def xml_parse(self, data):
- try:
- return ET.fromstring(data)
- except ET.ParseError:
- self.debug('%s is not a valid XML page. Please add "?XML=true" to tomcat status page.' % self.url)
- return None
-
- def xml_single_quote_fix_parse(self, data):
- data = single_quote_regex.sub(r"='\g<1>\g<2>'", data)
- return self.xml_parse(data)
-
- def check(self):
- self._manager = self._build_manager()
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return False
-
- if single_quote_regex.search(raw_data):
- self.warning('Tomcat status page is returning invalid single quote XML, please consider upgrading '
- 'your Tomcat installation. See https://bz.apache.org/bugzilla/show_bug.cgi?id=61603')
- self.parse = self.xml_single_quote_fix_parse
-
- return self.parse(raw_data) is not None
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = None
- raw_data = self._get_raw_data()
- if raw_data:
- xml = self.parse(raw_data)
- if xml is None:
- return None
-
- data = {}
-
- jvm = xml.find('jvm')
-
- connector = None
- if self.connector_name:
- for conn in xml.findall('connector'):
- if self.connector_name in conn.get('name'):
- connector = conn
- break
- else:
- connector = xml.find('connector')
-
- memory = jvm.find('memory')
- data['free'] = memory.get('free')
- data['total'] = memory.get('total')
-
- for pool in jvm.findall('memorypool'):
- name = pool.get('name')
- if 'Eden Space' in name:
- data['eden_used'] = pool.get('usageUsed')
- data['eden_committed'] = pool.get('usageCommitted')
- data['eden_max'] = pool.get('usageMax')
- elif 'Survivor Space' in name:
- data['survivor_used'] = pool.get('usageUsed')
- data['survivor_committed'] = pool.get('usageCommitted')
- data['survivor_max'] = pool.get('usageMax')
- elif 'Tenured Gen' in name or 'Old Gen' in name:
- data['tenured_used'] = pool.get('usageUsed')
- data['tenured_committed'] = pool.get('usageCommitted')
- data['tenured_max'] = pool.get('usageMax')
- elif name == 'Code Cache':
- data['code_cache_used'] = pool.get('usageUsed')
- data['code_cache_committed'] = pool.get('usageCommitted')
- data['code_cache_max'] = pool.get('usageMax')
- elif name == 'Compressed':
- data['compressed_used'] = pool.get('usageUsed')
- data['compressed_committed'] = pool.get('usageCommitted')
- data['compressed_max'] = pool.get('usageMax')
- elif name == 'Metaspace':
- data['metaspace_used'] = pool.get('usageUsed')
- data['metaspace_committed'] = pool.get('usageCommitted')
- data['metaspace_max'] = pool.get('usageMax')
-
- if connector is not None:
- thread_info = connector.find('threadInfo')
- data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
- data['currentThreadCount'] = thread_info.get('currentThreadCount')
-
- request_info = connector.find('requestInfo')
- data['processingTime'] = request_info.get('processingTime')
- data['requestCount'] = request_info.get('requestCount')
- data['errorCount'] = request_info.get('errorCount')
- data['bytesReceived'] = request_info.get('bytesReceived')
- data['bytesSent'] = request_info.get('bytesSent')
-
- return data or None
diff --git a/collectors/python.d.plugin/tomcat/tomcat.conf b/collectors/python.d.plugin/tomcat/tomcat.conf
deleted file mode 100644
index 009591bdf..000000000
--- a/collectors/python.d.plugin/tomcat/tomcat.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-# netdata python.d.plugin configuration for tomcat
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, tomcat also supports the following:
-#
-# url: 'URL' # the URL to fetch nginx's status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# if you have multiple connectors, the following are supported:
-#
-# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
-
-localipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/collectors/python.d.plugin/tor/Makefile.inc b/collectors/python.d.plugin/tor/Makefile.inc
deleted file mode 100644
index 5a45f9b79..000000000
--- a/collectors/python.d.plugin/tor/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += tor/tor.chart.py
-dist_pythonconfig_DATA += tor/tor.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += tor/README.md tor/Makefile.inc
-
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
deleted file mode 120000
index 7c20cd40a..000000000
--- a/collectors/python.d.plugin/tor/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/tor.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/tor/integrations/tor.md b/collectors/python.d.plugin/tor/integrations/tor.md
deleted file mode 100644
index 0e57fa793..000000000
--- a/collectors/python.d.plugin/tor/integrations/tor.md
+++ /dev/null
@@ -1,197 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/metadata.yaml"
-sidebar_label: "Tor"
-learn_status: "Published"
-learn_rel_path: "Data Collection/VPNs"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Tor
-
-
-<img src="https://netdata.cloud/img/tor.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: tor
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Tor bandwidth traffic .
-
-It connects to the Tor control port to collect traffic statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Tor instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| tor.traffic | read, write | KiB/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required python module
-
-The `stem` python library needs to be installed.
-
-
-#### Required Tor configuration
-
-Add to /etc/tor/torrc:
-
-ControlPort 9051
-
-For more options please read the manual.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/tor.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/tor.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| control_addr | Tor control IP address | 127.0.0.1 | no |
-| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |
-| password | Tor control password | | no |
-
-</details>
-
-#### Examples
-
-##### Local TCP
-
-A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
-
-<details><summary>Config</summary>
-
-```yaml
-local_tcp:
- name: 'local'
- control_port: 9051
- password: <password> # if required
-
-```
-</details>
-
-##### Local socket
-
-A basic local socket configuration
-
-<details><summary>Config</summary>
-
-```yaml
-local_socket:
- name: 'local'
- control_port: '/var/run/tor/control'
- password: <password> # if required
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin tor debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/tor/metadata.yaml b/collectors/python.d.plugin/tor/metadata.yaml
deleted file mode 100644
index 8647eca23..000000000
--- a/collectors/python.d.plugin/tor/metadata.yaml
+++ /dev/null
@@ -1,143 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: tor
- monitored_instance:
- name: Tor
- link: 'https://www.torproject.org/'
- categories:
- - data-collection.vpns
- icon_filename: 'tor.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - tor
- - traffic
- - vpn
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Tor bandwidth traffic .'
- method_description: 'It connects to the Tor control port to collect traffic statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Required python module'
- description: |
- The `stem` python library needs to be installed.
- - title: 'Required Tor configuration'
- description: |
- Add to /etc/tor/torrc:
-
- ControlPort 9051
-
- For more options please read the manual.
- configuration:
- file:
- name: python.d/tor.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: control_addr
- description: Tor control IP address
- default_value: 127.0.0.1
- required: false
- - name: control_port
- description: Tor control port. Can be either a tcp port, or a path to a socket file.
- default_value: 9051
- required: false
- - name: password
- description: Tor control password
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local TCP
- description: A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
- config: |
- local_tcp:
- name: 'local'
- control_port: 9051
- password: <password> # if required
- - name: Local socket
- description: A basic local socket configuration
- config: |
- local_socket:
- name: 'local'
- control_port: '/var/run/tor/control'
- password: <password> # if required
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: tor.traffic
- description: Tor Traffic
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
deleted file mode 100644
index f7bc2d79b..000000000
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: adaptec_raid netdata python.d module
-# Author: Federico Ceratto <federico.ceratto@gmail.com>
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- import stem
- import stem.connection
- import stem.control
-
- STEM_AVAILABLE = True
-except ImportError:
- STEM_AVAILABLE = False
-
-DEF_PORT = 'default'
-DEF_ADDR = '127.0.0.1'
-
-ORDER = [
- 'traffic',
-]
-
-CHARTS = {
- 'traffic': {
- 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
- 'lines': [
- ['read', 'read', 'incremental', 1, 1024],
- ['write', 'write', 'incremental', 1, -1024],
- ]
- }
-}
-
-
-class Service(SimpleService):
- """Provide netdata service for Tor"""
-
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.port = self.configuration.get('control_port', DEF_PORT)
- self.addr = self.configuration.get('control_addr', DEF_ADDR)
- self.password = self.configuration.get('password')
- self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
- self.conn = None
- self.alive = False
-
- def check(self):
- if not STEM_AVAILABLE:
- self.error('the stem library is missing')
- return False
-
- return self.connect()
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
-
- try:
- data['read'] = self.conn.get_info('traffic/read')
- data['write'] = self.conn.get_info('traffic/written')
- except stem.ControllerError as error:
- self.debug(error)
- self.alive = False
-
- return data or None
-
- def authenticate(self):
- try:
- self.conn.authenticate(password=self.password)
- except stem.connection.AuthenticationFailure as error:
- self.error('authentication error: {0}'.format(error))
- return False
- return True
-
- def connect_via_port(self):
- try:
- self.conn = stem.control.Controller.from_port(address=self.addr, port=self.port)
- except (stem.SocketError, ValueError) as error:
- self.error(error)
-
- def connect_via_socket(self):
- try:
- self.conn = stem.control.Controller.from_socket_file(path=self.port)
- except (stem.SocketError, ValueError) as error:
- self.error(error)
-
- def connect(self):
- if self.conn:
- self.conn.close()
- self.conn = None
-
- if self.use_socket:
- self.connect_via_socket()
- else:
- self.connect_via_port()
-
- if self.conn and self.authenticate():
- self.alive = True
-
- return self.alive
-
- def reconnect(self):
- return self.connect()
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
deleted file mode 100644
index c7c98dc0b..000000000
--- a/collectors/python.d.plugin/tor/tor.conf
+++ /dev/null
@@ -1,81 +0,0 @@
-# netdata python.d.plugin configuration for tor
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, tor plugin also supports the following:
-#
-# control_addr: 'address' # tor control IP address (defaults to '127.0.0.1')
-# control_port: 'port' # tor control port
-# password: 'password' # tor control password
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-# local_tcp:
-# name: 'local'
-# control_port: 9051
-# control_addr: 127.0.0.1
-# password: <password>
-#
-# local_socket:
-# name: 'local'
-# control_port: '/var/run/tor/control'
-# password: <password>
diff --git a/collectors/python.d.plugin/traefik/Makefile.inc b/collectors/python.d.plugin/traefik/Makefile.inc
deleted file mode 100644
index 926d56dda..000000000
--- a/collectors/python.d.plugin/traefik/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += traefik/traefik.chart.py
-dist_pythonconfig_DATA += traefik/traefik.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += traefik/README.md traefik/Makefile.inc
-
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
deleted file mode 100644
index 40ed24f04..000000000
--- a/collectors/python.d.plugin/traefik/README.md
+++ /dev/null
@@ -1,98 +0,0 @@
-<!--
-title: "Traefik monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md"
-sidebar_label: "traefik-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Webapps"
--->
-
-# Traefik collector
-
-Uses the `health` API to provide statistics.
-
-It produces:
-
-1. **Responses** by statuses
-
- - success (1xx, 2xx, 304)
- - error (5xx)
- - redirect (3xx except 304)
- - bad (4xx)
- - other (all other responses)
-
-2. **Responses** by codes
-
- - 2xx (successful)
- - 5xx (internal server errors)
- - 3xx (redirect)
- - 4xx (bad)
- - 1xx (informational)
- - other (non-standart responses)
-
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-
-4. **Requests**/s
-
- - request statistics
-
-5. **Total response time**
-
- - sum of all response time
-
-6. **Average response time**
-
-7. **Average response time per iteration**
-
-8. **Uptime**
-
- - Traefik server uptime
-
-## Configuration
-
-Edit the `python.d/traefik.conf` configuration file using `edit-config` from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically
-at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/traefik.conf
-```
-
-Needs only `url` to server's `health`
-
-Here is an example for local server:
-
-```yaml
-update_every: 1
-priority: 60000
-
-local:
- url: 'http://localhost:8080/health'
-```
-
-Without configuration, module attempts to connect to `http://localhost:8080/health`.
-
-
-
-
-### Troubleshooting
-
-To troubleshoot issues with the `traefik` module, run the `python.d.plugin` with the debug option enabled. The
-output will give you the output of the data collection job or error messages on why the collector isn't working.
-
-First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
-not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
-plugin's directory, switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-Now you can manually run the `traefik` module in debug mode:
-
-```bash
-./python.d.plugin traefik debug trace
-```
-
diff --git a/collectors/python.d.plugin/traefik/metadata.yaml b/collectors/python.d.plugin/traefik/metadata.yaml
deleted file mode 100644
index dcfb098a0..000000000
--- a/collectors/python.d.plugin/traefik/metadata.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-# This collector will not appear in documentation, as the go version is preferred,
-# https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/README.md
-#
-# meta:
-# plugin_name: python.d.plugin
-# module_name: traefik
-# monitored_instance:
-# name: python.d traefik
-# link: ''
-# categories: []
-# icon_filename: ''
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ''
-# keywords: []
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: ''
-# method_description: ''
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ''
-# default_behavior:
-# auto_detection:
-# description: ''
-# limits:
-# description: ''
-# performance_impact:
-# description: ''
-# setup:
-# prerequisites:
-# list: []
-# configuration:
-# file:
-# name: ''
-# description: ''
-# options:
-# description: ''
-# folding:
-# title: ''
-# enabled: true
-# list: []
-# examples:
-# folding:
-# enabled: true
-# title: ''
-# list: []
-# troubleshooting:
-# problems:
-# list: []
-# alerts: []
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: global
-# description: ""
-# labels: []
-# metrics:
-# - name: traefik.response_statuses
-# description: Response statuses
-# unit: "requests/s"
-# chart_type: stacked
-# dimensions:
-# - name: success
-# - name: error
-# - name: redirect
-# - name: bad
-# - name: other
-# - name: traefik.response_codes
-# description: Responses by codes
-# unit: "requests/s"
-# chart_type: stacked
-# dimensions:
-# - name: 2xx
-# - name: 5xx
-# - name: 3xx
-# - name: 4xx
-# - name: 1xx
-# - name: other
-# - name: traefik.detailed_response_codes
-# description: Detailed response codes
-# unit: "requests/s"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension for each response code family
-# - name: traefik.requests
-# description: Requests
-# unit: "requests/s"
-# chart_type: line
-# dimensions:
-# - name: requests
-# - name: traefik.total_response_time
-# description: Total response time
-# unit: "seconds"
-# chart_type: line
-# dimensions:
-# - name: response
-# - name: traefik.average_response_time
-# description: Average response time
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: response
-# - name: traefik.average_response_time_per_iteration
-# description: Average response time per iteration
-# unit: "milliseconds"
-# chart_type: line
-# dimensions:
-# - name: response
-# - name: traefik.uptime
-# description: Uptime
-# unit: "seconds"
-# chart_type: line
-# dimensions:
-# - name: uptime
diff --git a/collectors/python.d.plugin/traefik/traefik.chart.py b/collectors/python.d.plugin/traefik/traefik.chart.py
deleted file mode 100644
index 5a498467f..000000000
--- a/collectors/python.d.plugin/traefik/traefik.chart.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: traefik netdata python.d module
-# Author: Alexandre Menezes (@ale_menezes)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from collections import defaultdict
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'response_statuses',
- 'response_codes',
- 'detailed_response_codes',
- 'requests',
- 'total_response_time',
- 'average_response_time',
- 'average_response_time_per_iteration',
- 'uptime'
-]
-
-CHARTS = {
- 'response_statuses': {
- 'options': [None, 'Response statuses', 'requests/s', 'responses', 'traefik.response_statuses', 'stacked'],
- 'lines': [
- ['successful_requests', 'success', 'incremental'],
- ['server_errors', 'error', 'incremental'],
- ['redirects', 'redirect', 'incremental'],
- ['bad_requests', 'bad', 'incremental'],
- ['other_requests', 'other', 'incremental']
- ]
- },
- 'response_codes': {
- 'options': [None, 'Responses by codes', 'requests/s', 'responses', 'traefik.response_codes', 'stacked'],
- 'lines': [
- ['2xx', None, 'incremental'],
- ['5xx', None, 'incremental'],
- ['3xx', None, 'incremental'],
- ['4xx', None, 'incremental'],
- ['1xx', None, 'incremental'],
- ['other', None, 'incremental']
- ]
- },
- 'detailed_response_codes': {
- 'options': [None, 'Detailed response codes', 'requests/s', 'responses', 'traefik.detailed_response_codes',
- 'stacked'],
- 'lines': []
- },
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'traefik.requests', 'line'],
- 'lines': [
- ['total_count', 'requests', 'incremental']
- ]
- },
- 'total_response_time': {
- 'options': [None, 'Total response time', 'seconds', 'timings', 'traefik.total_response_time', 'line'],
- 'lines': [
- ['total_response_time_sec', 'response', 'absolute', 1, 10000]
- ]
- },
- 'average_response_time': {
- 'options': [None, 'Average response time', 'milliseconds', 'timings', 'traefik.average_response_time', 'line'],
- 'lines': [
- ['average_response_time_sec', 'response', 'absolute', 1, 1000]
- ]
- },
- 'average_response_time_per_iteration': {
- 'options': [None, 'Average response time per iteration', 'milliseconds', 'timings',
- 'traefik.average_response_time_per_iteration', 'line'],
- 'lines': [
- ['average_response_time_per_iteration_sec', 'response', 'incremental', 1, 10000]
- ]
- },
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'traefik.uptime', 'line'],
- 'lines': [
- ['uptime_sec', 'uptime', 'absolute']
- ]
- }
-}
-
-HEALTH_STATS = [
- 'uptime_sec',
- 'average_response_time_sec',
- 'total_response_time_sec',
- 'total_count',
- 'total_status_code_count'
-]
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', 'http://localhost:8080/health')
- self.order = ORDER
- self.definitions = CHARTS
- self.last_total_response_time = 0
- self.last_total_count = 0
- self.data = {
- 'successful_requests': 0,
- 'redirects': 0,
- 'bad_requests': 0,
- 'server_errors': 0,
- 'other_requests': 0,
- '1xx': 0,
- '2xx': 0,
- '3xx': 0,
- '4xx': 0,
- '5xx': 0,
- 'other': 0,
- 'average_response_time_per_iteration_sec': 0,
- }
-
- def _get_data(self):
- data = self._get_raw_data()
-
- if not data:
- return None
-
- data = loads(data)
-
- self.get_data_per_code_status(raw_data=data)
-
- self.get_data_per_code_family(raw_data=data)
-
- self.get_data_per_code(raw_data=data)
-
- self.data.update(fetch_data_(raw_data=data, metrics=HEALTH_STATS))
-
- self.data['average_response_time_sec'] *= 1000000
- self.data['total_response_time_sec'] *= 10000
- if data['total_count'] != self.last_total_count:
- self.data['average_response_time_per_iteration_sec'] = \
- (data['total_response_time_sec'] - self.last_total_response_time) * \
- 1000000 / (data['total_count'] - self.last_total_count)
- else:
- self.data['average_response_time_per_iteration_sec'] = 0
- self.last_total_response_time = data['total_response_time_sec']
- self.last_total_count = data['total_count']
-
- return self.data or None
-
- def get_data_per_code_status(self, raw_data):
- data = defaultdict(int)
- for code, value in raw_data['total_status_code_count'].items():
- code_prefix = code[0]
- if code_prefix == '1' or code_prefix == '2' or code == '304':
- data['successful_requests'] += value
- elif code_prefix == '3':
- data['redirects'] += value
- elif code_prefix == '4':
- data['bad_requests'] += value
- elif code_prefix == '5':
- data['server_errors'] += value
- else:
- data['other_requests'] += value
- self.data.update(data)
-
- def get_data_per_code_family(self, raw_data):
- data = defaultdict(int)
- for code, value in raw_data['total_status_code_count'].items():
- code_prefix = code[0]
- if code_prefix == '1':
- data['1xx'] += value
- elif code_prefix == '2':
- data['2xx'] += value
- elif code_prefix == '3':
- data['3xx'] += value
- elif code_prefix == '4':
- data['4xx'] += value
- elif code_prefix == '5':
- data['5xx'] += value
- else:
- data['other'] += value
- self.data.update(data)
-
- def get_data_per_code(self, raw_data):
- for code, value in raw_data['total_status_code_count'].items():
- if self.charts:
- if code not in self.data:
- self.charts['detailed_response_codes'].add_dimension([code, code, 'incremental'])
- self.data[code] = value
-
-
-def fetch_data_(raw_data, metrics):
- data = dict()
-
- for metric in metrics:
- value = raw_data
- metrics_list = metric.split('.')
- try:
- for m in metrics_list:
- value = value[m]
- except KeyError:
- continue
- data['_'.join(metrics_list)] = value
-
- return data
diff --git a/collectors/python.d.plugin/traefik/traefik.conf b/collectors/python.d.plugin/traefik/traefik.conf
deleted file mode 100644
index e3f182d32..000000000
--- a/collectors/python.d.plugin/traefik/traefik.conf
+++ /dev/null
@@ -1,77 +0,0 @@
-# netdata python.d.plugin configuration for traefik health data API
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, traefik plugin also supports the following:
-#
-# url: '<scheme>://<host>:<port>/<health_page_api>'
-# # http://localhost:8080/health
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-local:
- url: 'http://localhost:8080/health'
diff --git a/collectors/python.d.plugin/uwsgi/Makefile.inc b/collectors/python.d.plugin/uwsgi/Makefile.inc
deleted file mode 100644
index 75d96de0e..000000000
--- a/collectors/python.d.plugin/uwsgi/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += uwsgi/uwsgi.chart.py
-dist_pythonconfig_DATA += uwsgi/uwsgi.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += uwsgi/README.md uwsgi/Makefile.inc
-
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
deleted file mode 120000
index 44b855949..000000000
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/uwsgi.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
deleted file mode 100644
index af58608bd..000000000
--- a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
+++ /dev/null
@@ -1,219 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/metadata.yaml"
-sidebar_label: "uWSGI"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# uWSGI
-
-
-<img src="https://netdata.cloud/img/uwsgi.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: uwsgi
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors uWSGI metrics about requests, workers, memory and more.
-
-It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per uWSGI instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| uwsgi.requests | a dimension per worker | requests/s |
-| uwsgi.tx | a dimension per worker | KiB/s |
-| uwsgi.avg_rt | a dimension per worker | milliseconds |
-| uwsgi.memory_rss | a dimension per worker | MiB |
-| uwsgi.memory_vsz | a dimension per worker | MiB |
-| uwsgi.exceptions | exceptions | exceptions |
-| uwsgi.harakiris | harakiris | harakiris |
-| uwsgi.respawns | respawns | respawns |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the uWSGI Stats server
-
-Make sure that you uWSGI exposes it's metrics via a Stats server.
-
-Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/uwsgi.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/uwsgi.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
-| socket | The 'path/to/uwsgistats.sock' | no | no |
-| host | The host to connect to | no | no |
-| port | The port to connect to | no | no |
-
-</details>
-
-#### Examples
-
-##### Basic (default out-of-the-box)
-
-A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
-
-<details><summary>Config</summary>
-
-```yaml
-socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 1717
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details><summary>Config</summary>
-
-```yaml
-local:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-remote:
- name : 'remote'
- host : '192.0.2.1'
- port : 1717
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin uwsgi debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/uwsgi/metadata.yaml b/collectors/python.d.plugin/uwsgi/metadata.yaml
deleted file mode 100644
index cdb090ac1..000000000
--- a/collectors/python.d.plugin/uwsgi/metadata.yaml
+++ /dev/null
@@ -1,201 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: uwsgi
- monitored_instance:
- name: uWSGI
- link: "https://github.com/unbit/uwsgi/tree/2.0.21"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "uwsgi.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - application server
- - python
- - web applications
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors uWSGI metrics about requests, workers, memory and more."
- method_description: "It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the uWSGI Stats server
- description: |
- Make sure that you uWSGI exposes it's metrics via a Stats server.
-
- Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
- configuration:
- file:
- name: "python.d/uwsgi.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: The JOB's name as it will appear at the dashboard (by default is the job_name)
- default_value: job_name
- required: false
- - name: socket
- description: The 'path/to/uwsgistats.sock'
- default_value: no
- required: false
- - name: host
- description: The host to connect to
- default_value: no
- required: false
- - name: port
- description: The port to connect to
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic (default out-of-the-box)
- description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
- config: |
- socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
- localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
- localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
- localipv6:
- name : 'local'
- host : '::1'
- port : 1717
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local:
- name : 'local'
- host : 'localhost'
- port : 1717
-
- remote:
- name : 'remote'
- host : '192.0.2.1'
- port : 1717
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: uwsgi.requests
- description: Requests
- unit: "requests/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.tx
- description: Transmitted data
- unit: "KiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.avg_rt
- description: Average request time
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.memory_rss
- description: RSS (Resident Set Size)
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.memory_vsz
- description: VSZ (Virtual Memory Size)
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.exceptions
- description: Exceptions
- unit: "exceptions"
- chart_type: line
- dimensions:
- - name: exceptions
- - name: uwsgi.harakiris
- description: Harakiris
- unit: "harakiris"
- chart_type: line
- dimensions:
- - name: harakiris
- - name: uwsgi.respawns
- description: Respawns
- unit: "respawns"
- chart_type: line
- dimensions:
- - name: respawns
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
deleted file mode 100644
index e4d900005..000000000
--- a/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: uwsgi netdata python.d module
-# Author: Robbert Segeren (robbert-ef)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'requests',
- 'tx',
- 'avg_rt',
- 'memory_rss',
- 'memory_vsz',
- 'exceptions',
- 'harakiri',
- 'respawn',
-]
-
-DYNAMIC_CHARTS = [
- 'requests',
- 'tx',
- 'avg_rt',
- 'memory_rss',
- 'memory_vsz',
-]
-
-# NOTE: lines are created dynamically in `check()` method
-CHARTS = {
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
- 'lines': [
- ['requests', 'requests', 'incremental']
- ]
- },
- 'tx': {
- 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
- 'lines': [
- ['tx', 'tx', 'incremental']
- ]
- },
- 'avg_rt': {
- 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
- 'lines': [
- ['avg_rt', 'avg_rt', 'absolute']
- ]
- },
- 'memory_rss': {
- 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
- 'lines': [
- ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
- ]
- },
- 'memory_vsz': {
- 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
- 'lines': [
- ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
- ]
- },
- 'exceptions': {
- 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
- 'lines': [
- ['exceptions', 'exceptions', 'incremental']
- ]
- },
- 'harakiri': {
- 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
- 'lines': [
- ['harakiri_count', 'harakiris', 'incremental']
- ]
- },
- 'respawn': {
- 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
- 'lines': [
- ['respawn_count', 'respawns', 'incremental']
- ]
- },
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.url = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 1717)
- # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
- for chart in DYNAMIC_CHARTS:
- self.definitions[chart]['lines'] = []
- self.last_result = {}
- self.workers = []
-
- def read_data(self):
- """
- Read data from socket and parse as JSON.
- :return: (dict) stats
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
- try:
- return json.loads(raw_data)
- except ValueError as err:
- self.error(err)
- return None
-
- def check(self):
- """
- Parse configuration and check if we can read data.
- :return: boolean
- """
- self._parse_config()
- return bool(self.read_data())
-
- def add_worker_dimensions(self, key):
- """
- Helper to add dimensions for a worker.
- :param key: (int or str) worker identifier
- :return:
- """
- for chart in DYNAMIC_CHARTS:
- for line in CHARTS[chart]['lines']:
- dimension_id = '{}_{}'.format(line[0], key)
- dimension_name = str(key)
-
- dimension = [dimension_id, dimension_name] + line[2:]
- self.charts[chart].add_dimension(dimension)
-
- @staticmethod
- def _check_raw_data(data):
- # The server will close the connection when it's done sending
- # data, so just keep looping until that happens.
- return False
-
- def _get_data(self):
- """
- Read data from socket
- :return: dict
- """
- stats = self.read_data()
- if not stats:
- return None
-
- result = {
- 'exceptions': 0,
- 'harakiri_count': 0,
- 'respawn_count': 0,
- }
-
- for worker in stats['workers']:
- key = worker['pid']
-
- # Add dimensions for new workers
- if key not in self.workers:
- self.add_worker_dimensions(key)
- self.workers.append(key)
-
- result['requests_{}'.format(key)] = worker['requests']
- result['tx_{}'.format(key)] = worker['tx']
- result['avg_rt_{}'.format(key)] = worker['avg_rt']
-
- # avg_rt is not reset by uwsgi, so reset here
- if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
- result['avg_rt_{}'.format(key)] = 0
-
- result['memory_rss_{}'.format(key)] = worker['rss']
- result['memory_vsz_{}'.format(key)] = worker['vsz']
-
- result['exceptions'] += worker['exceptions']
- result['harakiri_count'] += worker['harakiri_count']
- result['respawn_count'] += worker['respawn_count']
-
- self.last_result = result
- return result
diff --git a/collectors/python.d.plugin/uwsgi/uwsgi.conf b/collectors/python.d.plugin/uwsgi/uwsgi.conf
deleted file mode 100644
index 7d09e7330..000000000
--- a/collectors/python.d.plugin/uwsgi/uwsgi.conf
+++ /dev/null
@@ -1,92 +0,0 @@
-# netdata python.d.plugin configuration for uwsgi
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, uwsgi also supports the following:
-#
-# socket: 'path/to/uwsgistats.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-
-socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 1717
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
deleted file mode 100644
index 2469b0592..000000000
--- a/collectors/python.d.plugin/varnish/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += varnish/varnish.chart.py
-dist_pythonconfig_DATA += varnish/varnish.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
-
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
deleted file mode 120000
index 194be2335..000000000
--- a/collectors/python.d.plugin/varnish/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/varnish.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/varnish/integrations/varnish.md b/collectors/python.d.plugin/varnish/integrations/varnish.md
deleted file mode 100644
index da74dcf8f..000000000
--- a/collectors/python.d.plugin/varnish/integrations/varnish.md
+++ /dev/null
@@ -1,213 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/metadata.yaml"
-sidebar_label: "Varnish"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Varnish
-
-
-<img src="https://netdata.cloud/img/varnish.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: varnish
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
-
-Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
-
-
-It uses the `varnishstat` tool in order to collect the metrics.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-`netdata` user must be a member of the `varnish` group.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Varnish instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.session_connection | accepted, dropped | connections/s |
-| varnish.client_requests | received | requests/s |
-| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |
-| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |
-| varnish.cached_objects_expired | objects | expired/s |
-| varnish.cached_objects_nuked | objects | nuked/s |
-| varnish.threads_total | None | number |
-| varnish.threads_statistics | created, failed, limited | threads/s |
-| varnish.threads_queue_len | in queue | requests |
-| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |
-| varnish.backend_requests | sent | requests/s |
-| varnish.esi_statistics | errors, warnings | problems/s |
-| varnish.memory_usage | free, allocated | MiB |
-| varnish.uptime | uptime | seconds |
-
-### Per Backend
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.backend | header, body | kilobits/s |
-
-### Per Storage
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.storage_usage | free, allocated | KiB |
-| varnish.storage_alloc_objs | allocated | objects |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Provide the necessary permissions
-
-In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
-
-```
-usermod -aG varnish netdata
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/varnish.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/varnish.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-An example configuration.
-
-```yaml
-job_name:
- instance_name: '<name-of-varnishd-instance>'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin varnish debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/varnish/metadata.yaml b/collectors/python.d.plugin/varnish/metadata.yaml
deleted file mode 100644
index d31c1cf6f..000000000
--- a/collectors/python.d.plugin/varnish/metadata.yaml
+++ /dev/null
@@ -1,253 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: varnish
- monitored_instance:
- name: Varnish
- link: https://varnish-cache.org/
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: 'varnish.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - varnish
- - varnishstat
- - varnishd
- - cache
- - web server
- - web cache
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
-
- Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
- method_description: |
- It uses the `varnishstat` tool in order to collect the metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: |
- `netdata` user must be a member of the `varnish` group.
- default_behavior:
- auto_detection:
- description: By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: Provide the necessary permissions
- description: |
- In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
-
- ```
- usermod -aG varnish netdata
- ```
- configuration:
- file:
- name: python.d/varnish.conf
- description: ''
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: instance_name
- description: the name of the varnishd instance to get logs from. If not specified, the local host name is used.
- default_value: ""
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: 'Config'
- list:
- - name: Basic
- description: An example configuration.
- folding:
- enabled: false
- config: |
- job_name:
- instance_name: '<name-of-varnishd-instance>'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: varnish.session_connection
- description: Connections Statistics
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: accepted
- - name: dropped
- - name: varnish.client_requests
- description: Client Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: received
- - name: varnish.all_time_hit_rate
- description: All History Hit Rate Ratio
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
- - name: hitpass
- - name: varnish.current_poll_hit_rate
- description: Current Poll Hit Rate Ratio
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
- - name: hitpass
- - name: varnish.cached_objects_expired
- description: Expired Objects
- unit: "expired/s"
- chart_type: line
- dimensions:
- - name: objects
- - name: varnish.cached_objects_nuked
- description: Least Recently Used Nuked Objects
- unit: "nuked/s"
- chart_type: line
- dimensions:
- - name: objects
- - name: varnish.threads_total
- description: Number Of Threads In All Pools
- unit: "number"
- chart_type: line
- dimensions:
- - name: None
- - name: varnish.threads_statistics
- description: Threads Statistics
- unit: "threads/s"
- chart_type: line
- dimensions:
- - name: created
- - name: failed
- - name: limited
- - name: varnish.threads_queue_len
- description: Current Queue Length
- unit: "requests"
- chart_type: line
- dimensions:
- - name: in queue
- - name: varnish.backend_connections
- description: Backend Connections Statistics
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: successful
- - name: unhealthy
- - name: reused
- - name: closed
- - name: recycled
- - name: failed
- - name: varnish.backend_requests
- description: Requests To The Backend
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: varnish.esi_statistics
- description: ESI Statistics
- unit: "problems/s"
- chart_type: line
- dimensions:
- - name: errors
- - name: warnings
- - name: varnish.memory_usage
- description: Memory Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: allocated
- - name: varnish.uptime
- description: Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - name: Backend
- description: ""
- labels: []
- metrics:
- - name: varnish.backend
- description: Backend {backend_name}
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: header
- - name: body
- - name: Storage
- description: ""
- labels: []
- metrics:
- - name: varnish.storage_usage
- description: Storage {storage_name} Usage
- unit: "KiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: allocated
- - name: varnish.storage_alloc_objs
- description: Storage {storage_name} Allocated Objects
- unit: "objects"
- chart_type: line
- dimensions:
- - name: allocated
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
deleted file mode 100644
index 506ad026a..000000000
--- a/collectors/python.d.plugin/varnish/varnish.chart.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: varnish netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-ORDER = [
- 'session_connections',
- 'client_requests',
- 'all_time_hit_rate',
- 'current_poll_hit_rate',
- 'cached_objects_expired',
- 'cached_objects_nuked',
- 'threads_total',
- 'threads_statistics',
- 'threads_queue_len',
- 'backend_connections',
- 'backend_requests',
- 'esi_statistics',
- 'memory_usage',
- 'uptime'
-]
-
-CHARTS = {
- 'session_connections': {
- 'options': [None, 'Connections Statistics', 'connections/s',
- 'client metrics', 'varnish.session_connection', 'line'],
- 'lines': [
- ['sess_conn', 'accepted', 'incremental'],
- ['sess_dropped', 'dropped', 'incremental']
- ]
- },
- 'client_requests': {
- 'options': [None, 'Client Requests', 'requests/s',
- 'client metrics', 'varnish.client_requests', 'line'],
- 'lines': [
- ['client_req', 'received', 'incremental']
- ]
- },
- 'all_time_hit_rate': {
- 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
- 'varnish.all_time_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-absolute-row'],
- ['cache_miss', 'miss', 'percentage-of-absolute-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
- },
- 'current_poll_hit_rate': {
- 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
- 'varnish.current_poll_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-incremental-row'],
- ['cache_miss', 'miss', 'percentage-of-incremental-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
- ]
- },
- 'cached_objects_expired': {
- 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
- 'varnish.cached_objects_expired', 'line'],
- 'lines': [
- ['n_expired', 'objects', 'incremental']
- ]
- },
- 'cached_objects_nuked': {
- 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
- 'varnish.cached_objects_nuked', 'line'],
- 'lines': [
- ['n_lru_nuked', 'objects', 'incremental']
- ]
- },
- 'threads_total': {
- 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
- 'varnish.threads_total', 'line'],
- 'lines': [
- ['threads', None, 'absolute']
- ]
- },
- 'threads_statistics': {
- 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
- 'varnish.threads_statistics', 'line'],
- 'lines': [
- ['threads_created', 'created', 'incremental'],
- ['threads_failed', 'failed', 'incremental'],
- ['threads_limited', 'limited', 'incremental']
- ]
- },
- 'threads_queue_len': {
- 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
- 'varnish.threads_queue_len', 'line'],
- 'lines': [
- ['thread_queue_len', 'in queue']
- ]
- },
- 'backend_connections': {
- 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
- 'varnish.backend_connections', 'line'],
- 'lines': [
- ['backend_conn', 'successful', 'incremental'],
- ['backend_unhealthy', 'unhealthy', 'incremental'],
- ['backend_reuse', 'reused', 'incremental'],
- ['backend_toolate', 'closed', 'incremental'],
- ['backend_recycle', 'recycled', 'incremental'],
- ['backend_fail', 'failed', 'incremental']
- ]
- },
- 'backend_requests': {
- 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
- 'varnish.backend_requests', 'line'],
- 'lines': [
- ['backend_req', 'sent', 'incremental']
- ]
- },
- 'esi_statistics': {
- 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
- 'lines': [
- ['esi_errors', 'errors', 'incremental'],
- ['esi_warnings', 'warnings', 'incremental']
- ]
- },
- 'memory_usage': {
- 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
- 'lines': [
- ['memory_free', 'free', 'absolute', 1, 1 << 20],
- ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
- },
- 'uptime': {
- 'lines': [
- ['uptime', None, 'absolute']
- ],
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
- }
-}
-
-
-def backend_charts_template(name):
- order = [
- '{0}_response_statistics'.format(name),
- ]
-
- charts = {
- order[0]: {
- 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
- 'varnish.backend', 'area'],
- 'lines': [
- ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
- ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
- ]
- },
- }
-
- return order, charts
-
-
-def storage_charts_template(name):
- order = [
- 'storage_{0}_usage'.format(name),
- 'storage_{0}_alloc_objs'.format(name)
- ]
-
- charts = {
- order[0]: {
- 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
- 'lines': [
- ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
- ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
- ]
- },
- order[1]: {
- 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
- 'lines': [
- ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
- ]
- }
- }
-
- return order, charts
-
-
-VARNISHSTAT = 'varnishstat'
-
-re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
-
-
-class VarnishVersion:
- def __init__(self, major, minor, patch):
- self.major = major
- self.minor = minor
- self.patch = patch
-
- def __str__(self):
- return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
-
-
-class Parser:
- _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
- _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)')
- _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
-
- def __init__(self):
- self.re_default = None
- self.re_backend = None
-
- def init(self, data):
- data = ''.join(data)
- parsed_main = Parser._default.findall(data)
- if parsed_main:
- self.re_default = Parser._default
-
- parsed_backend = Parser._backend_new.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_new
- else:
- parsed_backend = Parser._backend_old.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_old
-
- def server_stats(self, data):
- return self.re_default.findall(''.join(data))
-
- def backend_stats(self, data):
- return self.re_backend.findall(''.join(data))
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.instance_name = configuration.get('instance_name')
- self.parser = Parser()
- self.command = None
- self.collected_vbe = set()
- self.collected_storages = set()
-
- def create_command(self):
- varnishstat = find_binary(VARNISHSTAT)
-
- if not varnishstat:
- self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
- return False
-
- command = [varnishstat, '-V']
- reply = self._get_raw_data(stderr=True, command=command)
- if not reply:
- self.error(
- "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
- return False
-
- ver = parse_varnish_version(reply)
- if not ver:
- self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
- ' '.join(command), re_version.pattern, reply))
- return False
-
- if self.instance_name:
- self.command = [varnishstat, '-1', '-n', self.instance_name]
- else:
- self.command = [varnishstat, '-1']
-
- if ver.major > 4:
- self.command.extend(['-t', '1'])
-
- self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
-
- return True
-
- def check(self):
- if not self.create_command():
- return False
-
- # STDOUT is not empty
- reply = self._get_raw_data()
- if not reply:
- self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
- return False
-
- self.parser.init(reply)
-
- # Output is parsable
- if not self.parser.re_default:
- self.error('cant parse the output...')
- return False
-
- return True
-
- def get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- data = dict()
- server_stats = self.parser.server_stats(raw)
- if not server_stats:
- return None
-
- stats = dict((param, value) for _, param, value in server_stats)
- data.update(stats)
-
- self.get_vbe_backends(data, raw)
- self.get_storages(server_stats)
-
- # varnish 5 uses default.g_bytes and default.g_space
- data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
- data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
-
- return data
-
- def get_vbe_backends(self, data, raw):
- if not self.parser.re_backend:
- return
- stats = self.parser.backend_stats(raw)
- if not stats:
- return
-
- for (name, param, value) in stats:
- data['_'.join([name, param])] = value
- if name in self.collected_vbe:
- continue
- self.collected_vbe.add(name)
- self.add_backend_charts(name)
-
- def get_storages(self, server_stats):
- # Storage types:
- # - SMF: File Storage
- # - SMA: Malloc Storage
- # - MSE: Massive Storage Engine (Varnish-Plus only)
- #
- # Stats example:
- # [('SMF.', 'ssdStorage.c_req', '47686'),
- # ('SMF.', 'ssdStorage.c_fail', '0'),
- # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
- # ('SMF.', 'ssdStorage.c_freed', '140980224'),
- # ('SMF.', 'ssdStorage.g_alloc', '39753'),
- # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
- # ('SMF.', 'ssdStorage.g_space', '53159968768'),
- # ('SMF.', 'ssdStorage.g_smf', '40130'),
- # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
- # ('SMF.', 'ssdStorage.g_smf_large', '66')]
- storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
- if not storages:
- return
- for storage in storages:
- storage = storage.split('.')[0]
- if storage in self.collected_storages:
- continue
- self.collected_storages.add(storage)
- self.add_storage_charts(storage)
-
- def add_backend_charts(self, backend_name):
- self.add_charts(backend_name, backend_charts_template)
-
- def add_storage_charts(self, storage_name):
- self.add_charts(storage_name, storage_charts_template)
-
- def add_charts(self, name, charts_template):
- order, charts = charts_template(name)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
-
-def parse_varnish_version(lines):
- m = re_version.search(lines[0])
- if not m:
- return None
-
- m = m.groupdict()
- return VarnishVersion(
- int(m['major']),
- int(m['minor']),
- int(m['patch']),
- )
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
deleted file mode 100644
index 54bfe4dee..000000000
--- a/collectors/python.d.plugin/varnish/varnish.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for varnish
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, varnish also supports the following:
-#
-# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
-#
-# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/w1sensor/Makefile.inc b/collectors/python.d.plugin/w1sensor/Makefile.inc
deleted file mode 100644
index bddf146f5..000000000
--- a/collectors/python.d.plugin/w1sensor/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += w1sensor/w1sensor.chart.py
-dist_pythonconfig_DATA += w1sensor/w1sensor.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += w1sensor/README.md w1sensor/Makefile.inc
-
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
deleted file mode 120000
index c0fa9cd1b..000000000
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/1-wire_sensors.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
deleted file mode 100644
index fe3c05ba6..000000000
--- a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
+++ /dev/null
@@ -1,167 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/metadata.yaml"
-sidebar_label: "1-Wire Sensors"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# 1-Wire Sensors
-
-
-<img src="https://netdata.cloud/img/1-wire.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: w1sensor
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.
-
-The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector will try to auto detect available 1-Wire devices.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per 1-Wire Sensors instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| w1sensor.temp | a dimension per sensor | Celsius |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required Linux kernel modules
-
-Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/w1sensor.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/w1sensor.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |
-
-</details>
-
-#### Examples
-
-##### Provide human readable names
-
-Associate two 1-Wire identifiers with human readable names.
-
-```yaml
-sensors:
- name_00000022276e: 'Machine room'
- name_00000022298f: 'Rack 12'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin w1sensor debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/w1sensor/metadata.yaml b/collectors/python.d.plugin/w1sensor/metadata.yaml
deleted file mode 100644
index 7b0768237..000000000
--- a/collectors/python.d.plugin/w1sensor/metadata.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: w1sensor
- monitored_instance:
- name: 1-Wire Sensors
- link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "1-wire.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - temperature
- - sensor
- - 1-wire
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts."
- method_description: "The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The collector will try to auto detect available 1-Wire devices."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Required Linux kernel modules"
- description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded."
- configuration:
- file:
- name: python.d/w1sensor.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: name_<1-Wire id>
- description: This allows associating a human readable name with a sensor's 1-Wire identifier.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Provide human readable names
- description: Associate two 1-Wire identifiers with human readable names.
- config: |
- sensors:
- name_00000022276e: 'Machine room'
- name_00000022298f: 'Rack 12'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: w1sensor.temp
- description: 1-Wire Temperature Sensor
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
deleted file mode 100644
index 66797ced3..000000000
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: 1-wire temperature monitor netdata python.d module
-# Author: Diomidis Spinellis <http://www.spinellis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-
-# Location where 1-Wire devices can be found
-W1_DIR = '/sys/bus/w1/devices/'
-
-# Lines matching the following regular expression contain a temperature value
-RE_TEMP = re.compile(r' t=(-?\d+)')
-
-ORDER = [
- 'temp',
-]
-
-CHARTS = {
- 'temp': {
- 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
- 'lines': []
- }
-}
-
-# Known and supported family members
-# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
-THERM_FAMILY = {
- '10': 'W1_THERM_DS18S20',
- '22': 'W1_THERM_DS1822',
- '28': 'W1_THERM_DS18B20',
- '3b': 'W1_THERM_DS1825',
- '42': 'W1_THERM_DS28EA00',
-}
-
-
-class Service(SimpleService):
- """Provide netdata service for 1-Wire sensors"""
-
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.probes = []
-
- def check(self):
- """Auto-detect available 1-Wire sensors, setting line definitions
- and probes to be monitored."""
- try:
- file_names = os.listdir(W1_DIR)
- except OSError as err:
- self.error(err)
- return False
-
- lines = []
- for file_name in file_names:
- if file_name[2] != '-':
- continue
- if not file_name[0:2] in THERM_FAMILY:
- continue
-
- self.probes.append(file_name)
- identifier = file_name[3:]
- name = identifier
- config_name = self.configuration.get('name_' + identifier)
- if config_name:
- name = config_name
- lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
- 1, 10])
- self.definitions['temp']['lines'] = lines
- return len(self.probes) > 0
-
- def get_data(self):
- """Return data read from sensors."""
- data = dict()
-
- for file_name in self.probes:
- file_path = W1_DIR + file_name + '/w1_slave'
- identifier = file_name[3:]
- try:
- with open(file_path, 'r') as device_file:
- for line in device_file:
- matched = RE_TEMP.search(line)
- if matched:
- # Round to one decimal digit to filter-out noise
- value = round(int(matched.group(1)) / 1000., 1)
- value = int(value * 10)
- data['w1sensor_temp_' + identifier] = value
- except (OSError, IOError) as err:
- self.error(err)
- continue
- return data or None
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.conf b/collectors/python.d.plugin/w1sensor/w1sensor.conf
deleted file mode 100644
index b60d28650..000000000
--- a/collectors/python.d.plugin/w1sensor/w1sensor.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for w1sensor
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 5 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, w1sensor also supports the following:
-#
-# name_<1-Wire id>: '<human readable name>'
-# This allows associating a human readable name with a sensor's 1-Wire
-# identifier. Example:
-# name_00000022276e: 'Machine room'
-# name_00000022298f: 'Rack 12'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
diff --git a/collectors/python.d.plugin/zscores/Makefile.inc b/collectors/python.d.plugin/zscores/Makefile.inc
deleted file mode 100644
index d8b182415..000000000
--- a/collectors/python.d.plugin/zscores/Makefile.inc
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += zscores/zscores.chart.py
-dist_pythonconfig_DATA += zscores/zscores.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += zscores/README.md zscores/Makefile.inc
diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md
deleted file mode 120000
index 159ce0787..000000000
--- a/collectors/python.d.plugin/zscores/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/python.d_zscores.md \ No newline at end of file
diff --git a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
deleted file mode 100644
index 9d7d1c3d5..000000000
--- a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
+++ /dev/null
@@ -1,195 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/metadata.yaml"
-sidebar_label: "python.d zscores"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# python.d zscores
-
-Plugin: python.d.plugin
-Module: zscores
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
-
-
-This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`
-for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
-
-For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
-time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per python.d zscores instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| zscores.z | a dimension per chart or dimension | z |
-| zscores.3stddev | a dimension per chart or dimension | count |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Python Requirements
-
-This collector will only work with Python 3 and requires the below packages be installed.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages
-pip3 install numpy pandas requests netdata-pandas==0.0.38
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/zscores.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/zscores.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
-| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |
-| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |
-| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |
-| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |
-| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |
-| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |
-| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |
-| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |
-| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Default
-
-Default configuration.
-
-```yaml
-local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: 'system.uptime'
- train_secs: 14400
- offset_secs: 300
- train_every_n: 900
- z_smooth_n: 15
- z_clip: 10
- z_abs: 'true'
- burn_in: 2
- mode: 'per_chart'
- per_chart_agg: 'mean'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin zscores debug trace
- ```
-
-
diff --git a/collectors/python.d.plugin/zscores/metadata.yaml b/collectors/python.d.plugin/zscores/metadata.yaml
deleted file mode 100644
index 388e9b460..000000000
--- a/collectors/python.d.plugin/zscores/metadata.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: zscores
- monitored_instance:
- name: python.d zscores
- link: https://en.wikipedia.org/wiki/Standard_score
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - zscore
- - z-score
- - standard score
- - standard deviation
- - anomaly detection
- - statistical anomaly detection
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
- method_description: |
- This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`
- for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
-
- For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
- time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Python Requirements
- description: |
- This collector will only work with Python 3 and requires the below packages be installed.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # install required packages
- pip3 install numpy pandas requests netdata-pandas==0.0.38
- ```
- configuration:
- file:
- name: python.d/zscores.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: charts_regex
- description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
- default_value: "system\\..*"
- required: true
- - name: train_secs
- description: length of time (in seconds) to base calculations off for mean and stddev.
- default_value: 14400
- required: true
- - name: offset_secs
- description: offset (in seconds) preceding latest data to ignore when calculating mean and stddev.
- default_value: 300
- required: true
- - name: train_every_n
- description: recalculate the mean and stddev every n steps of the collector.
- default_value: 900
- required: true
- - name: z_smooth_n
- description: smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values.
- default_value: 15
- required: true
- - name: z_clip
- description: cap absolute value of zscore (before smoothing) for better stability.
- default_value: 10
- required: true
- - name: z_abs
- description: "set z_abs: 'true' to make all zscores be absolute values only."
- default_value: "true"
- required: true
- - name: burn_in
- description: burn in period in which to initially calculate mean and stddev on every step.
- default_value: 2
- required: true
- - name: mode
- description: mode can be to get a zscore 'per_dim' or 'per_chart'.
- default_value: per_chart
- required: true
- - name: per_chart_agg
- description: per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'.
- default_value: mean
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Default
- description: Default configuration.
- folding:
- enabled: false
- config: |
- local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: 'system.uptime'
- train_secs: 14400
- offset_secs: 300
- train_every_n: 900
- z_smooth_n: 15
- z_clip: 10
- z_abs: 'true'
- burn_in: 2
- mode: 'per_chart'
- per_chart_agg: 'mean'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: zscores.z
- description: Z Score
- unit: "z"
- chart_type: line
- dimensions:
- - name: a dimension per chart or dimension
- - name: zscores.3stddev
- description: Z Score >3
- unit: "count"
- chart_type: stacked
- dimensions:
- - name: a dimension per chart or dimension
diff --git a/collectors/python.d.plugin/zscores/zscores.chart.py b/collectors/python.d.plugin/zscores/zscores.chart.py
deleted file mode 100644
index 1099b9376..000000000
--- a/collectors/python.d.plugin/zscores/zscores.chart.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: zscores netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from datetime import datetime
-import re
-
-import requests
-import numpy as np
-import pandas as pd
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from netdata_pandas.data import get_data, get_allmetrics
-
-priority = 60000
-update_every = 5
-disabled_by_default = True
-
-ORDER = [
- 'z',
- '3stddev'
-]
-
-CHARTS = {
- 'z': {
- 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'],
- 'lines': []
- },
- '3stddev': {
- 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'],
- 'lines': []
- },
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.host = self.configuration.get('host', '127.0.0.1:19999')
- self.charts_regex = re.compile(self.configuration.get('charts_regex', 'system.*'))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- self.charts_in_scope = [
- c for c in
- list(filter(self.charts_regex.match,
- requests.get(f'http://{self.host}/api/v1/charts').json()['charts'].keys()))
- if c not in self.charts_to_exclude
- ]
- self.train_secs = self.configuration.get('train_secs', 14400)
- self.offset_secs = self.configuration.get('offset_secs', 300)
- self.train_every_n = self.configuration.get('train_every_n', 900)
- self.z_smooth_n = self.configuration.get('z_smooth_n', 15)
- self.z_clip = self.configuration.get('z_clip', 10)
- self.z_abs = bool(self.configuration.get('z_abs', True))
- self.burn_in = self.configuration.get('burn_in', 2)
- self.mode = self.configuration.get('mode', 'per_chart')
- self.per_chart_agg = self.configuration.get('per_chart_agg', 'mean')
- self.order = ORDER
- self.definitions = CHARTS
- self.collected_dims = {'z': set(), '3stddev': set()}
- self.df_mean = pd.DataFrame()
- self.df_std = pd.DataFrame()
- self.df_z_history = pd.DataFrame()
-
- def check(self):
- _ = get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.')
- return True
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def train_model(self):
- """Calculate the mean and stddev for all relevant metrics and store them for use in calulcating zscore at each timestep.
- """
- before = int(datetime.now().timestamp()) - self.offset_secs
- after = before - self.train_secs
-
- self.df_mean = get_data(
- self.host, self.charts_in_scope, after, before, points=10, group='average', col_sep='.'
- ).mean().to_frame().rename(columns={0: "mean"})
-
- self.df_std = get_data(
- self.host, self.charts_in_scope, after, before, points=10, group='stddev', col_sep='.'
- ).mean().to_frame().rename(columns={0: "std"})
-
- def create_data(self, df_allmetrics):
- """Use x, mean, stddev to generate z scores and 3stddev flags via some pandas manipulation.
- Returning two dictionaries of dimensions and measures, one for each chart.
-
- :param df_allmetrics <pd.DataFrame>: pandas dataframe with latest data from api/v1/allmetrics.
- :return: (<dict>,<dict>) tuple of dictionaries, one for zscores and the other for a flag if abs(z)>3.
- """
- # calculate clipped z score for each available metric
- df_z = pd.concat([self.df_mean, self.df_std, df_allmetrics], axis=1, join='inner')
- df_z['z'] = ((df_z['value'] - df_z['mean']) / df_z['std']).clip(-self.z_clip, self.z_clip).fillna(0) * 100
- if self.z_abs:
- df_z['z'] = df_z['z'].abs()
-
- # append last z_smooth_n rows of zscores to history table in wide format
- self.df_z_history = self.df_z_history.append(
- df_z[['z']].reset_index().pivot_table(values='z', columns='index'), sort=True
- ).tail(self.z_smooth_n)
-
- # get average zscore for last z_smooth_n for each metric
- df_z_smooth = self.df_z_history.melt(value_name='z').groupby('index')['z'].mean().to_frame()
- df_z_smooth['3stddev'] = np.where(abs(df_z_smooth['z']) > 300, 1, 0)
- data_z = df_z_smooth['z'].add_suffix('_z').to_dict()
-
- # aggregate to chart level if specified
- if self.mode == 'per_chart':
- df_z_smooth['chart'] = ['.'.join(x[0:2]) + '_z' for x in df_z_smooth.index.str.split('.').to_list()]
- if self.per_chart_agg == 'absmax':
- data_z = \
- list(df_z_smooth.groupby('chart').agg({'z': lambda x: max(x, key=abs)})['z'].to_dict().values())[0]
- else:
- data_z = list(df_z_smooth.groupby('chart').agg({'z': [self.per_chart_agg]})['z'].to_dict().values())[0]
-
- data_3stddev = {}
- for k in data_z:
- data_3stddev[k.replace('_z', '')] = 1 if abs(data_z[k]) > 300 else 0
-
- return data_z, data_3stddev
-
- def get_data(self):
-
- if self.runs_counter <= self.burn_in or self.runs_counter % self.train_every_n == 0:
- self.train_model()
-
- data_z, data_3stddev = self.create_data(
- get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.').transpose())
- data = {**data_z, **data_3stddev}
-
- self.validate_charts('z', data_z, divisor=100)
- self.validate_charts('3stddev', data_3stddev)
-
- return data
diff --git a/collectors/python.d.plugin/zscores/zscores.conf b/collectors/python.d.plugin/zscores/zscores.conf
deleted file mode 100644
index 07d62ebe6..000000000
--- a/collectors/python.d.plugin/zscores/zscores.conf
+++ /dev/null
@@ -1,108 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# - none
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- name: 'local'
-
- # what host to pull data from
- host: '127.0.0.1:19999'
-
- # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime'
-
- # length of time to base calculations off for mean and stddev
- train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
-
- # offset preceding latest data to ignore when calculating mean and stddev
- offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
-
- # recalculate the mean and stddev every n steps of the collector
- train_every_n: 900 # recalculate mean and stddev every 15 minutes
-
- # smooth the z score by averaging it over last n values
- z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes'
-
- # cap absolute value of zscore (before smoothing) for better stability
- z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average
-
- # set z_abs: 'true' to make all zscores be absolute values only.
- z_abs: 'true'
-
- # burn in period in which to initially calculate mean and stddev on every step
- burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
-
- # mode can be to get a zscore 'per_dim' or 'per_chart'
- mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
-
- # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
- per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.
diff --git a/collectors/slabinfo.plugin/Makefile.am b/collectors/slabinfo.plugin/Makefile.am
deleted file mode 100644
index 07796ea7b..000000000
--- a/collectors/slabinfo.plugin/Makefile.am
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- slabinfo.plugin \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md
deleted file mode 120000
index 4d4629a77..000000000
--- a/collectors/slabinfo.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/linux_kernel_slab_allocator_statistics.md \ No newline at end of file
diff --git a/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md b/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md
deleted file mode 100644
index ce8115270..000000000
--- a/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md
+++ /dev/null
@@ -1,131 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/slabinfo.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/slabinfo.plugin/metadata.yaml"
-sidebar_label: "Linux kernel SLAB allocator statistics"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Kernel"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Linux kernel SLAB allocator statistics
-
-
-<img src="https://netdata.cloud/img/linuxserver.svg" width="150"/>
-
-
-Plugin: slabinfo.plugin
-Module: slabinfo.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Collects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.
-
-
-The plugin parses `/proc/slabinfo`
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-This integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Due to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-SLAB cache utilization metrics for the whole system.
-
-### Per Linux kernel SLAB allocator statistics instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| mem.slabmemory | a dimension per cache | B |
-| mem.slabfilling | a dimension per cache | % |
-| mem.slabwaste | a dimension per cache | B |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Minimum setup
-
-If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugins]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>The main configuration file.</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/slabinfo.plugin/metadata.yaml b/collectors/slabinfo.plugin/metadata.yaml
deleted file mode 100644
index f19778297..000000000
--- a/collectors/slabinfo.plugin/metadata.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-plugin_name: slabinfo.plugin
-modules:
- - meta:
- plugin_name: slabinfo.plugin
- module_name: slabinfo.plugin
- monitored_instance:
- name: Linux kernel SLAB allocator statistics
- link: "https://kernel.org/"
- categories:
- - data-collection.linux-systems.kernel-metrics
- icon_filename: 'linuxserver.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - linux kernel
- - slab
- - slub
- - slob
- - slabinfo
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Collects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads
- in the kernel.
- method_description: "The plugin parses `/proc/slabinfo`"
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: false
- additional_permissions:
- description: >
- This integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by
- default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH`
- is added automatically during installation. This capability allows bypassing file read permission checks
- and directory read and execute permission checks. If file capabilities are not usable, then the plugin is
- instead installed with the SUID bit set in permissions sVko that it runs as root.
- default_behavior:
- auto_detection:
- description: >
- Due to the large number of metrics generated by this integration, it is disabled by default and must be
- manually enabled inside `/etc/netdata/netdata.conf`
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Minimum setup
- description: "If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`."
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugins]"
- description: "The main configuration file."
- options:
- description: ""
- folding:
- title: "The main configuration file."
- enabled: true
- list:
- - name: Enable plugin
- description: As described above plugin is disabled by default, this option is used to enable plugin.
- default_value: no
- required: true
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "SLAB cache utilization metrics for the whole system."
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mem.slabmemory
- description: Memory Usage
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per cache
- - name: mem.slabfilling
- description: Object Filling
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per cache
- - name: mem.slabwaste
- description: Memory waste
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per cache
diff --git a/collectors/slabinfo.plugin/slabinfo.c b/collectors/slabinfo.plugin/slabinfo.c
deleted file mode 100644
index 9b9119a6e..000000000
--- a/collectors/slabinfo.plugin/slabinfo.c
+++ /dev/null
@@ -1,393 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "daemon/common.h"
-#include "libnetdata/required_dummies.h"
-
-#define PLUGIN_SLABINFO_NAME "slabinfo.plugin"
-#define PLUGIN_SLABINFO_PROCFILE "/proc/slabinfo"
-
-#define CHART_TYPE "mem"
-#define CHART_FAMILY "slab"
-#define CHART_PRIO 3000
-
-// #define slabdebug(...) if (debug) { fprintf(stderr, __VA_ARGS__); }
-#define slabdebug(args...) if (debug) { \
- fprintf(stderr, "slabinfo.plugin DEBUG (%04d@%-10.10s:%-15.15s)::", __LINE__, __FILE__, __FUNCTION__); \
- fprintf(stderr, ##args); \
- fprintf(stderr, "\n"); }
-
-int running = 1;
-int debug = 0;
-size_t lines_discovered = 0;
-int redraw_chart = 0;
-
-// ----------------------------------------------------------------------------
-
-// Slabinfo format :
-// format 2.1 Was provided by 57ed3eda977a215f054102b460ab0eb5d8d112e6 (2.6.24-rc6) as:
-// seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
-// seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
-// seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
-//
-// With max values:
-// seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-// cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order));
-// seq_printf(m, " : tunables %4u %4u %4u",
-// sinfo.limit, sinfo.batchcount, sinfo.shared);
-// seq_printf(m, " : slabdata %6lu %6lu %6lu",
-// sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
-//
-// If CONFIG_DEBUG_SLAB is set, it will also add columns from slabinfo_show_stats (for SLAB only):
-// seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
-// allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees, overflows);
-// seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
-// allochit, allocmiss, freehit, freemiss);
-//
-// Implementation choices:
-// - Iterates through a linked list of kmem_cache.
-// - Name is a char* from struct kmem_cache (mm/slab.h).
-// - max name size found is 24:
-// grep -roP 'kmem_cache_create\(".+"'| awk '{split($0,a,"\""); print a[2],length(a[2]); }' | sort -k2 -n
-// - Using uint64 everywhere, as types fits and allows to use standard helpers
-
-struct slabinfo {
- // procfile fields
- const char *name;
- uint64_t active_objs;
- uint64_t num_objs;
- uint64_t obj_size;
- uint64_t obj_per_slab;
- uint64_t pages_per_slab;
- uint64_t tune_limit;
- uint64_t tune_batchcnt;
- uint64_t tune_shared_factor;
- uint64_t data_active_slabs;
- uint64_t data_num_slabs;
- uint64_t data_shared_avail;
-
- // Calculated fields
- uint64_t mem_usage;
- uint64_t mem_waste;
- uint8_t obj_filling;
-
- uint32_t hash;
- struct slabinfo *next;
-} *slabinfo_root = NULL, *slabinfo_next = NULL, *slabinfo_last_used = NULL;
-
-// The code is very inspired from "proc_net_dev.c" and "perf_plugin.c"
-
-// Get the existing object, or create a new one
-static struct slabinfo *get_slabstruct(const char *name) {
- struct slabinfo *s;
-
- slabdebug("--> Requested slabstruct %s", name);
-
- uint32_t hash = simple_hash(name);
-
- // Search it, from the next to the end
- for (s = slabinfo_next; s; s = s->next) {
- if ((hash = s->hash) && !strcmp(name, s->name)) {
- slabdebug("<-- Found existing slabstruct after %s", slabinfo_last_used->name);
- // Prepare the next run
- slabinfo_next = s->next;
- slabinfo_last_used = s;
- return s;
- }
- }
-
- // Search it from the beginning to the last position we used
- for (s = slabinfo_root; s != slabinfo_last_used; s = s->next) {
- if (hash == s->hash && !strcmp(name, s->name)) {
- slabdebug("<-- Found existing slabstruct after root %s", slabinfo_root->name);
- slabinfo_next = s->next;
- slabinfo_last_used = s;
- return s;
- }
- }
-
- // Create a new one
- s = callocz(1, sizeof(struct slabinfo));
- s->name = strdupz(name);
- s->hash = hash;
-
- // Add it to the current position
- if (slabinfo_root) {
- slabdebug("<-- Creating new slabstruct after %s", slabinfo_last_used->name);
- s->next = slabinfo_last_used->next;
- slabinfo_last_used->next = s;
- slabinfo_last_used = s;
- }
- else {
- slabdebug("<-- Creating new slabstruct as root");
- slabinfo_root = slabinfo_last_used = s;
- }
-
- return s;
-}
-
-
-// Read a full pass of slabinfo to update the structs
-struct slabinfo *read_file_slabinfo() {
-
- slabdebug("-> Reading procfile %s", PLUGIN_SLABINFO_PROCFILE);
-
- static procfile *ff = NULL;
- static long slab_pagesize = 0;
-
- if (unlikely(!slab_pagesize)) {
- slab_pagesize = sysconf(_SC_PAGESIZE);
- slabdebug(" Discovered pagesize: %ld", slab_pagesize);
- }
-
- if(unlikely(!ff)) {
- ff = procfile_reopen(ff, PLUGIN_SLABINFO_PROCFILE, " ,:" , PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- collector_error("<- Cannot open file '%s", PLUGIN_SLABINFO_PROCFILE);
- exit(1);
- }
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) {
- collector_error("<- Cannot read file '%s'", PLUGIN_SLABINFO_PROCFILE);
- exit(0);
- }
-
-
- // Iterate on all lines to populate / update the slabinfo struct
- size_t lines = procfile_lines(ff), l;
- if (unlikely(lines != lines_discovered)) {
- lines_discovered = lines;
- redraw_chart = 1;
- }
-
- slabdebug(" Read %lu lines from procfile", (unsigned long)lines);
- for(l = 2; l < lines; l++) {
- if (unlikely(procfile_linewords(ff, l) < 14)) {
- slabdebug(" Line %zu has only %zu words, skipping", l, procfile_linewords(ff,l));
- continue;
- }
-
- char *name = procfile_lineword(ff, l, 0);
- struct slabinfo *s = get_slabstruct(name);
-
- s->active_objs = str2uint64_t(procfile_lineword(ff, l, 1), NULL);
- s->num_objs = str2uint64_t(procfile_lineword(ff, l, 2), NULL);
- s->obj_size = str2uint64_t(procfile_lineword(ff, l, 3), NULL);
- s->obj_per_slab = str2uint64_t(procfile_lineword(ff, l, 4), NULL);
- s->pages_per_slab = str2uint64_t(procfile_lineword(ff, l, 5), NULL);
-
- s->tune_limit = str2uint64_t(procfile_lineword(ff, l, 7), NULL);
- s->tune_batchcnt = str2uint64_t(procfile_lineword(ff, l, 8), NULL);
- s->tune_shared_factor = str2uint64_t(procfile_lineword(ff, l, 9), NULL);
-
- s->data_active_slabs = str2uint64_t(procfile_lineword(ff, l, 11), NULL);
- s->data_num_slabs = str2uint64_t(procfile_lineword(ff, l, 12), NULL);
- s->data_shared_avail = str2uint64_t(procfile_lineword(ff, l, 13), NULL);
-
- uint32_t memperslab = s->pages_per_slab * slab_pagesize;
- // Internal fragmentation: loss per slab, due to objects not being a multiple of pagesize
- //uint32_t lossperslab = memperslab - s->obj_per_slab * s->obj_size;
-
- // Total usage = slabs * pages per slab * page size
- s->mem_usage = (uint64_t)(s->data_num_slabs * memperslab);
-
- // Wasted memory (filling): slabs allocated but not filled: sum total slab - sum total objects
- s->mem_waste = s->mem_usage - (uint64_t)(s->active_objs * s->obj_size);
- //if (s->data_num_slabs > 1)
- // s->mem_waste += s->data_num_slabs * lossperslab;
-
-
- // Slab filling efficiency
- if (s->num_objs > 0)
- s->obj_filling = 100 * s->active_objs / s->num_objs;
- else
- s->obj_filling = 0;
-
- slabdebug(" Updated slab %s: %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %"PRIu64" / %"PRIu64" %"PRIu64" %hhu",
- name, s->active_objs, s->num_objs, s->obj_size, s->obj_per_slab, s->pages_per_slab,
- s->tune_limit, s->tune_batchcnt, s->tune_shared_factor,
- s->data_active_slabs, s->data_num_slabs, s->data_shared_avail,
- s->mem_usage, s->mem_waste, s->obj_filling);
- }
-
- return slabinfo_root;
-}
-
-
-
-unsigned int do_slab_stats(int update_every) {
-
- static unsigned int loops = 0;
- struct slabinfo *sactive = NULL, *s = NULL;
-
- // Main processing loop
- while (running) {
-
- sactive = read_file_slabinfo();
-
- // Init Charts
- if (unlikely(redraw_chart)) {
- redraw_chart = 0;
- // Memory Usage
- printf("CHART %s.%s '' 'Memory Usage' 'B' '%s' '' line %d %d %s\n"
- , CHART_TYPE
- , "slabmemory"
- , CHART_FAMILY
- , CHART_PRIO
- , update_every
- , PLUGIN_SLABINFO_NAME
- );
- for (s = sactive; s; s = s->next) {
- printf("DIMENSION %s '' absolute 1 1\n", s->name);
- }
-
- // Slab active usage (filling)
- printf("CHART %s.%s '' 'Object Filling' '%%' '%s' '' line %d %d %s\n"
- , CHART_TYPE
- , "slabfilling"
- , CHART_FAMILY
- , CHART_PRIO + 1
- , update_every
- , PLUGIN_SLABINFO_NAME
- );
- for (s = sactive; s; s = s->next) {
- printf("DIMENSION %s '' absolute 1 1\n", s->name);
- }
-
- // Memory waste
- printf("CHART %s.%s '' 'Memory waste' 'B' '%s' '' line %d %d %s\n"
- , CHART_TYPE
- , "slabwaste"
- , CHART_FAMILY
- , CHART_PRIO + 2
- , update_every
- , PLUGIN_SLABINFO_NAME
- );
- for (s = sactive; s; s = s->next) {
- printf("DIMENSION %s '' absolute 1 1\n", s->name);
- }
- }
-
-
- //
- // Memory usage
- //
- printf("BEGIN %s.%s\n"
- , CHART_TYPE
- , "slabmemory"
- );
- for (s = sactive; s; s = s->next) {
- printf("SET %s = %"PRIu64"\n"
- , s->name
- , s->mem_usage
- );
- }
- printf("END\n");
-
- //
- // Slab active usage
- //
- printf("BEGIN %s.%s\n"
- , CHART_TYPE
- , "slabfilling"
- );
- for (s = sactive; s; s = s->next) {
- printf("SET %s = %u\n"
- , s->name
- , s->obj_filling
- );
- }
- printf("END\n");
-
- //
- // Memory waste
- //
- printf("BEGIN %s.%s\n"
- , CHART_TYPE
- , "slabwaste"
- );
- for (s = sactive; s; s = s->next) {
- printf("SET %s = %"PRIu64"\n"
- , s->name
- , s->mem_waste
- );
- }
- printf("END\n");
-
-
- loops++;
-
- sleep(update_every);
- }
-
- return loops;
-}
-
-
-
-
-// ----------------------------------------------------------------------------
-// main
-
-void usage(void) {
- fprintf(stderr, "%s\n", program_name);
- exit(1);
-}
-
-int main(int argc, char **argv) {
- clocks_init();
- nd_log_initialize_for_external_plugins("slabinfo.plugin");
-
- program_name = argv[0];
- program_version = "0.1";
-
- int update_every = 1, i, n, freq = 0;
-
- for (i = 1; i < argc; i++) {
- // Frequency parsing
- if(isdigit(*argv[i]) && !freq) {
- n = (int) str2l(argv[i]);
- if (n > 0) {
- if (n >= UPDATE_EVERY_MAX) {
- collector_error("Invalid interval value: %s", argv[i]);
- exit(1);
- }
- freq = n;
- }
- }
- else if (strcmp("debug", argv[i]) == 0) {
- debug = 1;
- continue;
- }
- else {
- fprintf(stderr,
- "netdata slabinfo.plugin %s\n"
- "This program is a data collector plugin for netdata.\n"
- "\n"
- "Available command line options:\n"
- "\n"
- " COLLECTION_FREQUENCY data collection frequency in seconds\n"
- " minimum: %d\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n",
- program_version,
- update_every
- );
- exit(1);
- }
- }
-
- if(freq >= update_every)
- update_every = freq;
- else if(freq)
- collector_error("update frequency %d seconds is too small for slabinfo. Using %d.", freq, update_every);
-
-
- // Call the main function. Time drift to be added
- do_slab_stats(update_every);
-
- return 0;
-}
diff --git a/collectors/statsd.plugin/Makefile.am b/collectors/statsd.plugin/Makefile.am
deleted file mode 100644
index c8144c137..000000000
--- a/collectors/statsd.plugin/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-statsdconfigdir=$(libconfigdir)/statsd.d
-dist_statsdconfig_DATA = \
- example.conf \
- k6.conf \
- asterisk.conf \
- $(NULL)
-
-userstatsdconfigdir=$(configdir)/statsd.d
-dist_userstatsdconfig_DATA = \
- $(NULL)
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userstatsdconfigdir)
diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md
deleted file mode 100644
index e3c8f9f81..000000000
--- a/collectors/statsd.plugin/README.md
+++ /dev/null
@@ -1,1052 +0,0 @@
-<!--
-title: "StatsD"
-description: "The Netdata Agent is a fully-featured StatsD server that collects metrics from any custom application and visualizes them in real-time."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/statsd.plugin/README.md"
-sidebar_label: "StatsD"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/Anything"
--->
-
-# StatsD
-
-[StatsD](https://github.com/statsd/statsd) is a system to collect data from any application. Applications send metrics to it,
-usually via non-blocking UDP communication, and StatsD servers collect these metrics, perform a few simple calculations on
-them and push them to backend time-series databases.
-
-If you want to learn more about the StatsD protocol, we have written a
-[blog post](https://blog.netdata.cloud/introduction-to-statsd/) about it!
-
-
-Netdata is a fully featured statsd server. It can collect statsd formatted metrics, visualize
-them on its dashboards and store them in it's database for long-term retention.
-
-Netdata statsd is inside Netdata (an internal plugin, running inside the Netdata daemon), it is
-configured via `netdata.conf` and by-default listens on standard statsd port 8125. Netdata supports
-both TCP and UDP packets at the same time.
-
-Since statsd is embedded in Netdata, it means you now have a statsd server embedded on all your servers.
-
-Netdata statsd is fast. It can collect several millions of metrics per second on modern hardware, using
-just 1 CPU core. The implementation uses two threads: one thread collects metrics, another thread updates
-the charts from the collected data.
-
-## Available StatsD synthetic application charts
-
-Netdata ships with a few synthetic chart definitions to automatically present application metrics into a
-more uniform way. These synthetic charts are configuration files (you can create your own) that re-arrange
-statsd metrics into a more meaningful way.
-
-On synthetic charts, we can have alerts as with any metric and chart.
-
-- [K6 load testing tool](https://k6.io)
- - **Description:** k6 is a developer-centric, free and open-source load testing tool built for making performance testing a productive and enjoyable experience.
- - [Documentation](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/k6.md)
- - [Configuration](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/k6.conf)
-- [Asterisk](https://www.asterisk.org/)
- - **Description:** Asterisk is an Open Source PBX and telephony toolkit.
- - [Documentation](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/asterisk.md)
- - [Configuration](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/asterisk.conf)
-
-## Metrics supported by Netdata
-
-Netdata fully supports the StatsD protocol and also extends it to support more advanced Netdata specific use cases.
-All StatsD client libraries can be used with Netdata too.
-
-- **Gauges**
-
- The application sends `name:value|g`, where `value` is any **decimal/fractional** number, StatsD reports the
- latest value collected and the number of times it was updated (events).
-
- The application may increment or decrement a previous value, by setting the first character of the value to
- `+` or `-` (so, the only way to set a gauge to an absolute negative value, is to first set it to zero).
-
- [Sampling rate](#sampling-rates) is supported.
- [Tags](#tags) are supported for changing chart units, family and dimension name.
-
- When a gauge is not collected and the setting is not to show gaps on the charts (the default), the last value will be shown, until a data collection event changes it.
-
-- **Counters** and **Meters**
-
- The application sends `name:value|c`, `name:value|C` or `name:value|m`, where `value` is a positive or negative **integer** number of events occurred, StatsD reports the **rate** and the number of times it was updated (events).
-
- `:value` can be omitted and StatsD will assume it is `1`. `|c`, `|C` and `|m` can be omitted and StatsD will assume it is `|m`. So, the application may send just `name` and StatsD will parse it as `name:1|m`.
-
- - Counters use `|c` (etsy/StatsD compatible) or `|C` (brubeck compatible)
- - Meters use `|m`
-
- [Sampling rate](#sampling-rates) is supported.
- [Tags](#tags) are supported for changing chart units, family and dimension name.
-
- When a counter or meter is not collected, StatsD **defaults** to showing a zero value, until a data collection event changes the value.
-
-- **Timers** and **Histograms**
-
- The application sends `name:value|ms` or `name:value|h`, where `value` is any **decimal/fractional** number, StatsD reports **min**, **max**, **average**, **95th percentile**, **median** and **standard deviation** and the total number of times it was updated (events). Internally it also calculates the **sum**, which is available for synthetic charts.
-
- - Timers use `|ms`
- - Histograms use `|h`
-
- The only difference between the two, is the `units` of the charts, as timers report *milliseconds*.
-
- [Sampling rate](#sampling-rates) is supported.
- [Tags](#tags) are supported for changing chart units and family.
-
- When a counter or meter is not collected, StatsD **defaults** to showing a zero value, until a data collection event changes the value.
-
-- **Sets**
-
- The application sends `name:value|s`, where `value` is anything (**number or text**, leading and trailing spaces are removed), StatsD reports the number of unique values sent and the number of times it was updated (events).
-
- Sampling rate is **not** supported for Sets. `value` is always considered text (so `01` and `1` are considered different).
-
- [Tags](#tags) are supported for changing chart units and family.
-
- When a set is not collected, Netdata **defaults** to showing a zero value, until a data collection event changes the value.
-
-- **Dictionaries**
-
- The application sends `name:value|d`, where `value` is anything (**number or text**, leading and trailing spaces are removed), StatsD reports the number of events sent for each `value` and the total times `name` was updated (events).
-
- Sampling rate is **not** supported for Dictionaries. `value` is always considered text (so `01` and `1` are considered different).
-
- [Tags](#tags) are supported for changing chart units and family.
-
- When a set is not collected, Netdata **defaults** to showing a zero value, until a data collection event changes the value.
-
-#### Sampling Rates
-
-The application may append `|@sampling_rate`, where `sampling_rate` is a number from `0.0` to `1.0` in order for StatD to extrapolate the value and predict the total for the entire period. If the application reports to StatsD a value for 1/10th of the time, it can append `|@0.1` to the metrics it sends to statsd.
-
-#### Tags
-
-The application may append `|#tag1:value1,tag2:value2,tag3:value3` etc, where `tagX` and `valueX` are strings. `:valueX` can be omitted.
-
-Currently, Netdata uses only 2 tags:
-
- * `units=string` which sets the units of the chart that is automatically generated
- * `family=string` which sets the family of the chart that is automatically generated (the family is the submenu of the dashboard)
- * `name=string` which sets the name of the dimension of the chart that is automatically generated (only for counters, meters, gauges)
-
-Other tags are parsed, but currently are ignored.
-
-Charts are not updated to change units or dimension names once they are created. So, either send the tags on every event, or use the special `zinit` value to initiaze the charts at the beginning. `zinit` is a special value that can be used on any chart, to have netdata initialize the charts, without actually setting any values to them. So, instead of sending `my.metric:VALUE|c|#units=bytes,name=size` every time, the application can send at the beginning `my.metric:zinit|c|#units=bytes,name=size` and then `my.metric:VALUE|c`.
-
-#### Overlapping metrics
-
-Netdata's StatsD server maintains different indexes for each of the metric types supported. This means the same metric `name` may exist under different types concurrently.
-
-#### How to name your metrics
-
-A good practice is to name your metrics like `application.operation.metric`, where:
-
-- `application` is the application name - Netdata will automatically create a dashboard section based on the first keyword of the metrics, so you can have all your applications in different sections.
-- `operation` is the operation your application is executing, like `dbquery`, `request`, `response`, etc.
-- `metric` is anything you want to name your metric as. Netdata will automatically append the metric type (meter, counter, gauge, set, dictionary, timer, histogram) to the generated chart.
-
-Using [Tags](#tags) you can also change the submenus of the dashboard, the units of the charts and for meters, counters and gauges, the name of dimension. So, you can have a usable default view without using [Synthetic StatsD charts](#synthetic-statsd-charts)
-
-#### Multiple metrics per packet
-
-Netdata accepts multiple metrics per packet if each is terminated with a newline (`\n`) at the end.
-
-#### TCP packets
-
-Netdata listens for both TCP and UDP packets. For TCP, is it important to always append `\n` on each metric, as Netdata will use the newline character to detect if a metric is split into multiple TCP packets.
-
-
-#### UDP packets
-
-When sending multiple metrics over a single UDP message, it is important not to exceed the network MTU, which is usually 1500 bytes.
-
-Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU.
-
-> You can read more about the network maximum transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/).
-
-## Configuration
-
-You can find the configuration at `/etc/netdata/netdata.conf`:
-
-```
-[statsd]
- # enabled = yes
- # decimal detail = 1000
- # update every (flushInterval) = 1
- # udp messages to process at once = 10
- # create private charts for metrics matching = *
- # max private charts hard limit = 1000
- # cleanup obsolete charts after secs = 0
- # private charts memory mode = save
- # private charts history = 3996
- # histograms and timers percentile (percentThreshold) = 95.00000
- # add dimension for number of events received = no
- # gaps on gauges (deleteGauges) = no
- # gaps on counters (deleteCounters) = no
- # gaps on meters (deleteMeters) = no
- # gaps on sets (deleteSets) = no
- # gaps on histograms (deleteHistograms) = no
- # gaps on timers (deleteTimers) = no
- # listen backlog = 4096
- # default port = 8125
- # bind to = udp:localhost:8125 tcp:localhost:8125
-```
-
-### StatsD main config options
-
-- `enabled = yes|no`
-
- controls if StatsD will be enabled for this Netdata. The default is enabled.
-
-- `default port = 8125`
-
- controls the default port StatsD will use if no port is defined in the following setting.
-
-- `bind to = udp:localhost tcp:localhost`
-
- is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname.
-
-- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts.
-
-- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64-bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
-
-The rest of the settings are discussed below.
-
-## StatsD charts
-
-Netdata can visualize StatsD collected metrics in 2 ways:
-
-1. Each metric gets its own **private chart**. This is the default and does not require any configuration. You can adjust the default parameters.
-
-2. **Synthetic charts** can be created, combining multiple metrics, independently of their metric types. For this type of charts, special configuration is required, to define the chart title, type, units, its dimensions, etc.
-
-### Private metric charts
-
-Private charts are controlled with `create private charts for metrics matching = *`. This setting accepts a space-separated list of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). Netdata will create private charts for all metrics **by default**.
-
-For example, to render charts for all `myapp.*` metrics, except `myapp.*.badmetric`, use:
-
-```
-create private charts for metrics matching = !myapp.*.badmetric myapp.*
-```
-
-You can specify Netdata StatsD to have a different `memory mode` than the rest of the Netdata Agent. You can read more about `memory mode` in the [documentation](https://github.com/netdata/netdata/blob/master/database/README.md).
-
-The default behavior is to use the same settings as the rest of the Netdata Agent. If you wish to change them, edit the following settings:
-- `private charts memory mode`
-- `private charts history`
-
-### Optimize private metric charts storage
-
-For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
-
-If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality.
-
-Example private charts (automatically generated without any configuration):
-
-#### Counters
-
-- Scope: **count the events of something** (e.g. number of file downloads)
-- Format: `name:INTEGER|c` or `name:INTEGER|C` or `name|c`
-- StatsD increments the counter by the `INTEGER` number supplied (positive, or negative).
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131553/4a26d19c-3aa3-11e7-94e8-c53b5ed6ebc3.png)
-
-#### Gauges
-
-- Scope: **report the value of something** (e.g. cache memory used by the application server)
-- Format: `name:FLOAT|g`
-- StatsD remembers the last value supplied, and can increment or decrement the latest value if `FLOAT` begins with `+` or `-`.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131575/5d54e6f0-3aa3-11e7-9099-bc4440cd4592.png)
-
-#### histograms
-
-- Scope: **statistics on a size of events** (e.g. statistics on the sizes of files downloaded)
-- Format: `name:FLOAT|h`
-- StatsD maintains a list of all the values supplied and provides statistics on them.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131587/704de72a-3aa3-11e7-9ea9-0d2bb778c150.png)
-
-The same chart with `sum` unselected, to show the detail of the dimensions supported:
-![image](https://cloud.githubusercontent.com/assets/2662304/26131598/8076443a-3aa3-11e7-9ffa-ea535aee9c9f.png)
-
-#### Meters
-
-This is identical to `counter`.
-
-- Scope: **count the events of something** (e.g. number of file downloads)
-- Format: `name:INTEGER|m` or `name|m` or just `name`
-- StatsD increments the counter by the `INTEGER` number supplied (positive, or negative).
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131605/8fdf5a06-3aa3-11e7-963f-7ecf207d1dbc.png)
-
-#### Sets
-
-- Scope: **count the unique occurrences of something** (e.g. unique filenames downloaded, or unique users that downloaded files)
-- Format: `name:TEXT|s`
-- StatsD maintains a unique index of all values supplied, and reports the unique entries in it.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131612/9eaa7b1a-3aa3-11e7-903b-d881e9a35be2.png)
-
-#### Timers
-
-- Scope: **statistics on the duration of events** (e.g. statistics for the duration of file downloads)
-- Format: `name:FLOAT|ms`
-- StatsD maintains a list of all the values supplied and provides statistics on them.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/26131629/bc34f2d2-3aa3-11e7-8a07-f2fc94ba4352.png)
-
-### Synthetic StatsD charts
-
-Use synthetic charts to create dedicated sections on the dashboard to render your StatsD charts.
-
-Synthetic charts are organized in
-
-- **application** aka section in Netdata Dashboard.
-- **charts for each application** aka family in Netdata Dashboard.
-- **StatsD metrics for each chart** /aka charts and context Netdata Dashboard.
-
-> You can read more about how the Netdata Agent organizes information in the relevant [documentation](https://github.com/netdata/netdata/blob/master/web/README.md)
-
-For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`.
-
-For example, if you want to monitor the application `myapp` using StatsD and Netdata, create the file `/etc/netdata/statsd.d/myapp.conf`, with this content:
-```
-[app]
- name = myapp
- metrics = myapp.*
- private charts = no
- gaps when not collected = no
- history = 60
-
-[dictionary]
- m1 = metric1
- m2 = metric2
-
-# replace 'mychart' with the chart id
-# the chart will be named: myapp.mychart
-[mychart]
- name = mychart
- title = my chart title
- family = my family
- context = chart.context
- units = tests/s
- priority = 91000
- type = area
- dimension = myapp.metric1 m1
- dimension = myapp.metric2 m2
-```
-
-Using the above configuration `myapp` should get its own section on the dashboard, having one chart with 2 dimensions.
-
-`[app]` starts a new application definition. The supported settings in this section are:
-
-- `name` defines the name of the app.
-- `metrics` is a Netdata [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). This pattern should match all the possible StatsD metrics that will be participating in the application `myapp`.
-- `private charts = yes|no`, enables or disables private charts for the metrics matched.
-- `gaps when not collected = yes|no`, enables or disables gaps on the charts of the application in case that no metrics are collected.
-- `memory mode` sets the memory mode for all charts of the application. The default is the global default for Netdata (not the global default for StatsD private charts). We suggest not to use this (we have commented it out in the example) and let your app use the global default for Netdata, which is our dbengine.
-
-- `history` sets the size of the round-robin database for this application. The default is the global default for Netdata (not the global default for StatsD private charts). This is only relevant if you use `memory mode = save`. Read more on our [metrics storage(]/docs/store/change-metrics-storage.md) doc.
-
-`[dictionary]` defines name-value associations. These are used to renaming metrics, when added to synthetic charts. Metric names are also defined at each `dimension` line. However, using the dictionary dimension names can be declared globally, for each app and is the only way to rename dimensions when using patterns. Of course the dictionary can be empty or missing.
-
-Then, add any number of charts. Each chart should start with `[id]`. The chart will be called `app_name.id`. `family` controls the submenu on the dashboard. `context` controls the alert templates. `priority` controls the ordering of the charts on the dashboard. The rest of the settings are informational.
-
-Add any number of metrics to a chart, using `dimension` lines. These lines accept 5 space separated parameters:
-
-1. the metric name, as it is collected (it has to be matched by the `metrics =` pattern of the app)
-2. the dimension name, as it should be shown on the chart
-3. an optional selector (type) of the value to shown (see below)
-4. an optional multiplier
-5. an optional divider
-6. optional flags, space separated and enclosed in quotes. All the external plugins `DIMENSION` flags can be used. Currently, the only usable flag is `hidden`, to add the dimension, but not show it on the dashboard. This is usually needed to have the values available for percentage calculation, or use them in alerts.
-
-So, the format is this:
-
-```
-dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS
-```
-
-`pattern` is a keyword. When set, `METRIC` is expected to be a Netdata [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) that will be used to match all the StatsD metrics to be added to the chart. So, `pattern` automatically matches any number of StatsD metrics, all of which will be added as separate chart dimensions.
-
-`TYPE`, `MULTIPLIER`, `DIVIDER` and `OPTIONS` are optional.
-
-`TYPE` can be:
-
-- `events` to show the number of events received by StatsD for this metric
-- `last` to show the last value, as calculated at the flush interval of the metric (the default)
-
-Then for histograms and timers the following types are also supported:
-
-- `min`, show the minimum value
-- `max`, show the maximum value
-- `sum`, show the sum of all values
-- `average` (same as `last`)
-- `percentile`, show the 95th percentile (or any other percentile, as configured at StatsD global config)
-- `median`, show the median of all values (i.e. sort all values and get the middle value)
-- `stddev`, show the standard deviation of the values
-
-#### Example synthetic charts
-
-StatsD metrics: `foo` and `bar`.
-
-Contents of file `/etc/netdata/stats.d/foobar.conf`:
-
-```
-[app]
- name = foobarapp
- metrics = foo bar
- private charts = yes
-
-[foobar_chart1]
- title = Hey, foo and bar together
- family = foobar_family
- context = foobarapp.foobars
- units = foobars
- type = area
- dimension = foo 'foo me' last 1 1
- dimension = bar 'bar me' last 1 1
-```
-
-Metrics sent to statsd: `foo:10|g` and `bar:20|g`.
-
-Private charts:
-
-![screenshot from 2017-08-03 23-28-19](https://user-images.githubusercontent.com/2662304/28942295-7c3a73a8-78a3-11e7-88e5-a9a006bb7465.png)
-
-Synthetic chart:
-
-![screenshot from 2017-08-03 23-29-14](https://user-images.githubusercontent.com/2662304/28942317-958a2c68-78a3-11e7-853f-32850141dd36.png)
-
-#### Renaming StatsD synthetic charts' metrics
-
-You can define a dictionary to rename metrics sent by StatsD clients. This enables you to send response `"200"` and Netdata visualize it as `succesful connection`
-
-The `[dictionary]` section accepts any number of `name = value` pairs.
-
-Netdata uses this dictionary as follows:
-
-1. When a `dimension` has a non-empty `NAME`, that name is looked up at the dictionary.
-
-2. If the above lookup gives nothing, or the `dimension` has an empty `NAME`, the original StatsD metric name is looked up at the dictionary.
-
-3. If any of the above succeeds, Netdata uses the `value` of the dictionary, to set the name of the dimension. The dimensions will have as ID the original StatsD metric name, and as name, the dictionary value.
-
-Use the dictionary in 2 ways:
-
-1. set `dimension = myapp.metric1 ''` and have at the dictionary `myapp.metric1 = metric1 name`
-2. set `dimension = myapp.metric1 'm1'` and have at the dictionary `m1 = metric1 name`
-
-In both cases, the dimension will be added with ID `myapp.metric1` and will be named `metric1 name`. So, in alerts use either of the 2 as `${myapp.metric1}` or `${metric1 name}`.
-
-> keep in mind that if you add multiple times the same StatsD metric to a chart, Netdata will append `TYPE` to the dimension ID, so `myapp.metric1` will be added as `myapp.metric1_last` or `myapp.metric1_events`, etc. If you add multiple times the same metric with the same `TYPE` to a chart, Netdata will also append an incremental counter to the dimension ID, i.e. `myapp.metric1_last1`, `myapp.metric1_last2`, etc.
-
-#### Dimension patterns
-
-Netdata allows adding multiple dimensions to a chart, by matching the StatsD metrics with a Netdata simple pattern.
-
-Assume we have an API that provides StatsD metrics for each response code per method it supports, like these:
-
-```
-myapp.api.get.200
-myapp.api.get.400
-myapp.api.get.500
-myapp.api.del.200
-myapp.api.del.400
-myapp.api.del.500
-myapp.api.post.200
-myapp.api.post.400
-myapp.api.post.500
-myapp.api.all.200
-myapp.api.all.400
-myapp.api.all.500
-```
-
-In order to add all the response codes of `myapp.api.get` to a chart, we simply make the following configuration:
-
-```
-[api_get_responses]
- ...
- dimension = pattern 'myapp.api.get.* '' last 1 1
-```
-
-The above will add dimension named `200`, `400` and `500`. Netdata extracts the wildcard part of the metric name - so the dimensions will be named with whatever the `*` matched.
-
-You can rename the dimensions with this:
-
-```
-[dictionary]
- get.200 = 200 ok
- get.400 = 400 bad request
- get.500 = 500 cannot connect to db
-
-[api_get_responses]
- ...
- dimension = pattern 'myapp.api.get.* 'get.' last 1 1
-```
-
-Note that we added a `NAME` to the dimension line with `get.`. This is prefixed to the wildcarded part of the metric name, to compose the key for looking up the dictionary. So `500` became `get.500` which was looked up to the dictionary to find value `500 cannot connect to db`. This way we can have different dimension names, for each of the API methods (i.e. `get.500 = 500 cannot connect to db` while `post.500 = 500 cannot write to disk`).
-
-To add all 200s across all API methods to a chart, you can do this:
-
-```
-[ok_by_method]
- ...
- dimension = pattern 'myapp.api.*.200 '' last 1 1
-```
-
-The above will add `get`, `post`, `del` and `all` to the chart.
-
-If `all` is not wanted (a `stacked` chart does not need the `all` dimension, since the sum of the dimensions provides the total), the line should be:
-
-```
-[ok_by_method]
- ...
- dimension = pattern '!myapp.api.all.* myapp.api.*.200 '' last 1 1
-```
-
-With the above, all methods except `all` will be added to the chart.
-
-To automatically rename the methods, you can use this:
-
-```
-[dictionary]
- method.get = GET
- method.post = ADD
- method.del = DELETE
-
-[ok_by_method]
- ...
- dimension = pattern '!myapp.api.all.* myapp.api.*.200 'method.' last 1 1
-```
-
-Using the above, the dimensions will be added as `GET`, `ADD` and `DELETE`.
-
-## StatsD examples
-
-### Python
-
-It's really easy to instrument your python application with StatsD, for example using [jsocol/pystatsd](https://github.com/jsocol/pystatsd).
-
-```python
-import statsd
-c = statsd.StatsClient('localhost', 8125)
-c.incr('foo') # Increment the 'foo' counter.
-for i in range(100000000):
- c.incr('bar')
- c.incr('foo')
- if i % 3:
- c.decr('bar')
- c.timing('stats.timed', 320) # Record a 320ms 'stats.timed'.
-```
-
-You can find detailed documentation in their [documentation page](https://statsd.readthedocs.io/en/v3.3/).
-
-### Javascript and Node.js
-
-Using the client library by [sivy/node-statsd](https://github.com/sivy/node-statsd), you can easily embed StatsD into your Node.js project.
-
-```javascript
- var StatsD = require('node-statsd'),
- client = new StatsD();
-
- // Timing: sends a timing command with the specified milliseconds
- client.timing('response_time', 42);
-
- // Increment: Increments a stat by a value (default is 1)
- client.increment('my_counter');
-
- // Decrement: Decrements a stat by a value (default is -1)
- client.decrement('my_counter');
-
- // Using the callback
- client.set(['foo', 'bar'], 42, function(error, bytes){
- //this only gets called once after all messages have been sent
- if(error){
- console.error('Oh noes! There was an error:', error);
- } else {
- console.log('Successfully sent', bytes, 'bytes');
- }
- });
-
- // Sampling, tags and callback are optional and could be used in any combination
- client.histogram('my_histogram', 42, 0.25); // 25% Sample Rate
- client.histogram('my_histogram', 42, ['tag']); // User-defined tag
- client.histogram('my_histogram', 42, next); // Callback
- client.histogram('my_histogram', 42, 0.25, ['tag']);
- client.histogram('my_histogram', 42, 0.25, next);
- client.histogram('my_histogram', 42, ['tag'], next);
- client.histogram('my_histogram', 42, 0.25, ['tag'], next);
-```
-### Other languages
-
-You can also use StatsD with:
-- Golang, thanks to [alexcesaro/statsd](https://github.com/alexcesaro/statsd)
-- Ruby, thanks to [reinh/statsd](https://github.com/reinh/statsd)
-- Java, thanks to [DataDog/java-dogstatsd-client](https://github.com/DataDog/java-dogstatsd-client)
-
-
-### Shell
-
-Getting the proper support for a programming language is not always easy, but the Unix shell is available on most Unix systems. You can use shell and `nc` to instrument your systems and send metric data to Netdata's StatsD implementation.
-
-Using the method you can send metrics from any script. You can generate events like: backup.started, backup.ended, backup.time, or even tail logs and convert them to metrics.
-
-> **IMPORTANT**:
->
-> To send StatsD messages you need from the `netcat` package, the `nc` command.
-> There are multiple versions of this package. Please try to experiment with the `nc` command you have available on your right system, to find the right parameters.
->
-> In the examples below, we assume the `openbsd-netcat` is installed.
-
-If you plan to send short StatsD events at sporadic occasions, use UDP. The messages should not be too long (remember, most networks support up to 1500 bytes MTU, which is also the limit for StatsD messages over UDP). The good thing is that using UDP will not block your script, even if the StatsD server is not there (UDP messages are "fire-and-forget").
-
-
-For UDP use this:
-
-```sh
-echo "APPLICATION.METRIC:VALUE|TYPE" | nc -u -w 0 localhost 8125
-```
-
-`-u` turns on UDP, `-w 0` tells `nc` not to wait for a response from StatsD (idle time to close the connection).
-
-where:
-
-- `APPLICATION` is any name for your application
-- `METRIC` is the name for the specific metric
-- `VALUE` is the value for that metric (**meters**, **counters**, **gauges**, **timers** and **histograms** accept integer/decimal/fractional numbers, **sets** and **dictionaries** accept strings)
-- `TYPE` is one of `m`, `c`, `g`, `ms`, `h`, `s`, `d` to define the metric type.
-
-For tailing a log and converting it to metrics, do something like this:
-
-```sh
-tail -f some.log | awk 'awk commands to parse the log and format statsd metrics' | nc -N -w 120 localhost 8125
-```
-
-`-N` tells `nc` to close the socket once it receives EOF on its input. `-w 120` tells `nc` to stop if the connection is idle for 120 seconds. The timeout is needed to stop the `nc` command if you restart Netdata while `nc` is connected to it. Without it, `nc` will sit idle forever.
-
-When you embed the above commands to a script, you may notice that all the metrics are sent to StatsD with a delay. They are buffered in the pipes `|`. You can turn them to real-time by prepending each command with `stdbuf -i0 -oL -eL command to be run`, like this:
-
-```sh
-stdbuf -i0 -oL -eL tail -f some.log |\
- stdbuf -i0 -oL -eL awk 'awk commands to parse the log and format statsd metrics' |\
- stdbuf -i0 -oL -eL nc -N -w 120 localhost 8125
-```
-
-If you use `mawk` you also need to run awk with `-W interactive`.
-
-Examples:
-
-To set `myapp.used_memory` as gauge to value `123456`, use:
-
-```sh
-echo "myapp.used_memory:123456|g|#units:bytes" | nc -u -w 0 localhost 8125
-```
-
-To increment `myapp.files_sent` by `10`, as a counter, use:
-
-```sh
-echo "myapp.files_sent:10|c|#units:files" | nc -u -w 0 localhost 8125
-```
-
-You can send multiple metrics like this:
-
-```sh
-# send multiple metrics via UDP
-printf "myapp.used_memory:123456|g|#units:bytes\nmyapp.files_sent:10|c|#units:files\n" | nc -u -w 0 localhost 8125
-```
-
-Remember, for UDP communication each packet should not exceed the MTU. So, if you plan to push too many metrics at once, prefer TCP communication:
-
-```sh
-# send multiple metrics via TCP
-cat /tmp/statsd.metrics.txt | nc -N -w 120 localhost 8125
-```
-
-You can also use this little function to take care of all the details:
-
-```sh
-#!/usr/bin/env bash
-
-# we assume nc is from the openbsd-netcat package
-
-STATSD_HOST="localhost"
-STATSD_PORT="8125"
-statsd() {
- local options="-u -w 0" all="${*}"
-
- # replace all spaces with newlines
- all="${all// /\\n}"
-
- # if the string length of all parameters given is above 1000, use TCP
- [ "${#all}" -gt 1000 ] && options="-N -w 0"
-
- # send the metrics to statsd
- printf "${all}\n" | nc ${options} ${STATSD_HOST} ${STATSD_PORT} || return 1
-
- return 0
-}
-
-if [ ! -z "${*}" ]
-then
- statsd "${@}"
-fi
-```
-
-You can use it like this:
-
-```sh
-# first, source it in your script
-source statsd.sh
-
-# then, at any point:
-statsd "myapp.used_memory:123456|g|#units:bytes" "myapp.files_sent:10|c|#units:files" ...
-```
-
-or even at a terminal prompt, like this:
-
-```sh
-./statsd.sh "myapp.used_memory:123456|g|#units:bytes" "myapp.files_sent:10|c|#units:files" ...
-```
-
-The function is smart enough to call `nc` just once and pass all the metrics to it. It will also automatically switch to TCP if the metrics to send are above 1000 bytes.
-
-If you have gotten thus far, make sure to check out our [community forums](https://community.netdata.cloud) to share your experience using Netdata with StatsD.
-
-## StatsD Step By Step Guide
-
-In this guide, we'll go through a scenario of visualizing our data in Netdata in a matter of seconds using
-[k6](https://k6.io), an open-source tool for automating load testing that outputs metrics to the StatsD format.
-
-Although we'll use k6 as the use-case, the same principles can be applied to every application that supports
-the StatsD protocol. Simply enable the StatsD output and point it to the node that runs Netdata, which is `localhost` in this case.
-
-In general, the process for creating a StatsD collector can be summarized in 2 steps:
-
-- Run an experiment by sending StatsD metrics to Netdata, without any prior configuration. This will create
- a chart per metric (called private charts) and will help you verify that everything works as expected from the application side of things.
-
- - Make sure to reload the dashboard tab **after** you start sending data to Netdata.
-
-- Create a configuration file for your app using [edit-config](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md): `sudo ./edit-config
- statsd.d/myapp.conf`
-
- - Each app will have it's own section in the right-hand menu.
-
-Now, let's see the above process in detail.
-
-### Prerequisites
-
-- A node with the [Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md) installed.
-- An application to instrument. For this guide, that will be [k6](https://k6.io/docs/getting-started/installation).
-
-### Understanding the metrics
-
-The real in instrumenting an application with StatsD for you is to decide what metrics you
-want to visualize and how you want them grouped. In other words, you need decide which metrics
-will be grouped in the same charts and how the charts will be grouped on Netdata's dashboard.
-
-Start with documentation for the particular application that you want to monitor (or the
-technological stack that you are using). In our case, the
-[k6 documentation](https://k6.io/docs/using-k6/metrics/) has a whole page dedicated to the
-metrics output by k6, along with descriptions.
-
-If you are using StatsD to monitor an existing application, you don't have much control over
-these metrics. For example, k6 has a type called `trend`, which is identical to timers and histograms.
-Thus, _k6 is clearly dictating_ which metrics can be used as histograms and simple gauges.
-
-On the other hand, if you are instrumenting your own code, you will need to not only decide what are
-the "things" that you want to measure, but also decide which StatsD metric type is the appropriate for each.
-
-### Use private charts to see all available metrics
-
-In Netdata, every metric will receive its own chart, called a `private chart`. Although in the
-final implementation this is something that we will disable, since it can create considerable noise
-(imagine having 100s of metrics), it’s very handy while building the configuration file.
-
-You can get a quick visual representation of the metrics and their type (e.g it’s a gauge, a timer, etc.).
-
-An important thing to notice is that StatsD has different types of metrics, as illustrated in the
-[supported metrics](#metrics-supported-by-netdata). Histograms and timers support mathematical operations
-to be performed on top of the baseline metric, like reporting the `average` of the value.
-
-Here are some examples of default private charts. You can see that the histogram private charts will
-visualize all the available operations.
-
-**Gauge private chart**
-
-![Gauge metric example](https://i.imgur.com/Sr5nJEV.png)
-
-**Histogram private chart**
-
-![Timer metric example](https://i.imgur.com/P4p0hvq.png)
-
-### Create a new StatsD configuration file
-
-Start by creating a new configuration file under the `statsd.d/` folder in the
-[Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-Use [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files)
-to create a new file called `k6.conf`.
-
-```bash=
-sudo ./edit-config statsd.d/k6.conf
-```
-
-Copy the following configuration into your file as a starting point.
-
-```conf
-[app]
- name = k6
- metrics = k6*
- private charts = yes
- gaps when not collected = no
- memory mode = dbengine
-```
-
-Next, you need is to understand how to organize metrics in Netdata’s StatsD.
-
-#### Synthetic charts
-
-Netdata lets you group the metrics exposed by your instrumented application with _synthetic charts_.
-
-First, create a `[dictionary]` section to transform the names of the metrics into human-readable equivalents.
-`http_req_blocked`, `http_req_connecting`, `http_req_receiving`, and `http_reqs` are all metrics exposed by k6.
-
-```
-[dictionary]
- http_req_blocked = Blocked HTTP Requests
- http_req_connecting = Connecting HTTP Requests
- http_req_receiving = Receiving HTTP Requests
- http_reqs = Total HTTP requests
-```
-
-Continue this dictionary process with any other metrics you want to collect with Netdata.
-
-#### Families and context
-
-Families and context are additional ways to group metrics. Families control the submenu at right-hand menu and
-it's a subcategory of the section. Given the metrics given by K6, we are organizing them in 2 major groups,
-or `families`: `k6 native metrics` and `http metrics`.
-
-Context is a second way to group metrics, when the metrics are of the same nature but different origin. In
-our case, if we ran several different load testing experiments side-by-side, we could define the same app,
-but different context (e.g `http_requests.experiment1`, `http_requests.experiment2`).
-
-Find more details about family and context in our [documentation](https://github.com/netdata/netdata/blob/master/web/README.md#families).
-
-#### Dimensions
-
-Now, having decided on how we are going to group the charts, we need to define how we are going to group
-metrics into different charts. This is particularly important, since we decide:
-
-- What metrics **not** to show, since they are not useful for our use-case.
-- What metrics to consolidate into the same charts, so as to reduce noise and increase visual correlation.
-
-The dimension option has this syntax: `dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS`
-
-- **pattern**: A keyword that tells the StatsD server the `METRIC` string is actually a
- [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md).
- We don't use simple patterns in the example, but if we wanted to visualize all the `http_req` metrics, we
- could have a single dimension: `dimension = pattern 'k6.http_req*' last 1 1`. Find detailed examples with
- patterns in [dimension patterns](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md#dimension-patterns).
-
-- **METRIC** The id of the metric as it comes from the client. You can easily find this in the private charts above,
- for example: `k6.http_req_connecting`.
-
-- **NAME**: The name of the dimension. You can use the dictionary to expand this to something more human-readable.
-
-- **TYPE**:
-
- - For all charts:
- - `events`: The number of events (data points) received by the StatsD server
- - `last`: The last value that the server received
-
- - For histograms and timers:
- - `min`, `max`, `sum`, `average`, `percentile`, `median`, `stddev`: This is helpful if you want to see
- different representations of the same value. You can find an example at the `[iteration_duration]`
- above. Note that the baseline `metric` is the same, but the `name` of the dimension is different,
- since we use the baseline, but we perform a computation on it, creating a different final metric for
- visualization(dimension).
-
-- **MULTIPLIER DIVIDER**: Handy if you want to convert Kilobytes to Megabytes or you want to give negative value.
- The second is handy for better visualization of send/receive. You can find an example at the **packets** submenu of the **IPv4 Networking Section**.
-
-If you define a chart, run Netdata to visualize metrics, and then add or remove a dimension from that chart,
-this will result in a new chart with the same name, confusing Netdata. If you change the dimensions of the chart,
-make sure to also change the `name` of that chart, since it serves as the `id` of that chart in Netdata's storage.
-(e.g http_req --> http_req_1).
-
-#### Finalize your StatsD configuration file
-
-It's time to assemble all the pieces together and create the synthetic charts that will consist our application
-dashboard in Netdata. We can do it in a few simple steps:
-
-- Decide which metrics we want to use (we have viewed all of them as private charts). For example, we want to use
- `k6.http_requests`, `k6.vus`, etc.
-
-- Decide how we want organize them in different synthetic charts. For example, we want `k6.http_requests`, `k6.vus`
- on their own, but `k6.http_req_blocked` and `k6.http_req_connecting` on the same chart.
-
-- For each synthetic chart, we define a **unique** name and a human readable title.
-
-- We decide at which `family` (submenu section) we want each synthetic chart to belong to. For example, here we
- have defined 2 families: `http requests`, `k6_metrics`.
-
-- If we have multiple instances of the same metric, we can define different contexts, (Optional).
-
-- We define a dimension according to the syntax we highlighted above.
-
-- We define a type for each synthetic chart (line, area, stacked)
-
-- We define the units for each synthetic chart.
-
-Following the above steps, we append to the `k6.conf` that we defined above, the following configuration:
-
-```
-[http_req_total]
- name = http_req_total
- title = Total HTTP Requests
- family = http requests
- context = k6.http_requests
- dimension = k6.http_reqs http_reqs last 1 1 sum
- type = line
- units = requests/s
-
-[vus]
- name = vus
- title = Virtual Active Users
- family = k6_metrics
- dimension = k6.vus vus last 1 1
- dimension = k6.vus_max vus_max last 1 1
- type = line
- unit = vus
-
-[iteration_duration]
- name = iteration_duration_2
- title = Iteration duration
- family = k6_metrics
- dimension = k6.iteration_duration iteration_duration last 1 1
- dimension = k6.iteration_duration iteration_duration_max max 1 1
- dimension = k6.iteration_duration iteration_duration_min min 1 1
- dimension = k6.iteration_duration iteration_duration_avg avg 1 1
- type = line
- unit = s
-
-[dropped_iterations]
- name = dropped_iterations
- title = Dropped Iterations
- family = k6_metrics
- dimension = k6.dropped_iterations dropped_iterations last 1 1
- units = iterations
- type = line
-
-[data]
- name = data
- title = K6 Data
- family = k6_metrics
- dimension = k6.data_received data_received last 1 1
- dimension = k6.data_sent data_sent last -1 1
- units = kb/s
- type = area
-
-[http_req_status]
- name = http_req_status
- title = HTTP Requests Status
- family = http requests
- dimension = k6.http_req_blocked http_req_blocked last 1 1
- dimension = k6.http_req_connecting http_req_connecting last 1 1
- units = ms
- type = line
-
-[http_req_duration]
- name = http_req_duration
- title = HTTP requests duration
- family = http requests
- dimension = k6.http_req_sending http_req_sending last 1 1
- dimension = k6.http_req_waiting http_req_waiting last 1 1
- dimension = k6.http_req_receiving http_req_receiving last 1 1
- units = ms
- type = stacked
-```
-
-Note that Netdata will report the rate for metrics and counters, even if k6 or another application
-sends an _absolute_ number. For example, k6 sends absolute HTTP requests with `http_reqs`,
-but Netdata visualizes that in `requests/second`.
-
-To enable this StatsD configuration, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
-
-### Final touches
-
-At this point, you have used StatsD to gather metrics for k6, creating a whole new section in your
-Netdata dashboard in the process. Moreover, you can further customize the icon of the particular section,
-as well as the description for each chart.
-
-To edit the section, please follow the Netdata [documentation](https://github.com/netdata/netdata/blob/master/web/gui/README.md#customizing-the-local-dashboard).
-
-While the following configuration will be placed in a new file, as the documentation suggests, it is
-instructing to use `dashboard_info.js` as a template. Open the file and see how the rest of sections and collectors have been defined.
-
-```javascript=
-netdataDashboard.menu = {
- 'k6': {
- title: 'K6 Load Testing',
- icon: '<i class="fas fa-cogs"></i>',
- info: 'k6 is an open-source load testing tool and cloud service providing the best developer experience for API performance testing.'
- },
- .
- .
- .
-```
-
-We can then add a description for each chart. Simply find the following section in `dashboard_info.js` to understand how a chart definitions are used:
-
-```javascript=
-netdataDashboard.context = {
- 'system.cpu': {
- info: function (os) {
- void (os);
- return 'Total CPU utilization (all cores). 100% here means there is no CPU idle time at all. You can get per core usage at the <a href="#menu_cpu">CPUs</a> section and per application usage at the <a href="#menu_apps">Applications Monitoring</a> section.'
- + netdataDashboard.sparkline('<br/>Keep an eye on <b>iowait</b> ', 'system.cpu', 'iowait', '%', '. If it is constantly high, your disks are a bottleneck and they slow your system down.')
- + netdataDashboard.sparkline('<br/>An important metric worth monitoring, is <b>softirq</b> ', 'system.cpu', 'softirq', '%', '. A constantly high percentage of softirq may indicate network driver issues.');
- },
- valueRange: "[0, 100]"
- },
-```
-
-Afterwards, you can open your `custom_dashboard_info.js`, as suggested in the documentation linked above,
-and add something like the following example:
-
-```javascript=
-netdataDashboard.context = {
- 'k6.http_req_duration': {
- info: "Total time for the request. It's equal to http_req_sending + http_req_waiting + http_req_receiving (i.e. how long did the remote server take to process the request and respond, without the initial DNS lookup/connection times)"
- },
-
-```
-The chart is identified as ``<section_name>.<chart_name>``.
-
-These descriptions can greatly help the Netdata user who is monitoring your application in the midst of an incident.
-
-The `info` field supports `html`, embedding useful links and instructions in the description.
-
-### Vendoring a new collector
-
-While we learned how to visualize any data source in Netdata using the StatsD protocol, we have also created a new collector.
-
-As long as you use the same underlying collector, every new `myapp.conf` file will create a new data
-source and dashboard section for Netdata. Netdata loads all the configuration files by default, but it will
-**not** create dashboard sections or charts, unless it starts receiving data for that particular data source.
-This means that we can now share our collector with the rest of the Netdata community.
-
-- Make sure you follow the [contributing guide](https://github.com/netdata/.github/edit/main/CONTRIBUTING.md)
-- Fork the netdata/netdata repository
-- Place the configuration file inside `netdata/collectors/statsd.plugin`
-- Add a reference in `netdata/collectors/statsd.plugin/Makefile.am`. For example, if we contribute the `k6.conf` file:
-```Makefile
-dist_statsdconfig_DATA = \
- example.conf \
- k6.conf \
- $(NULL)
-```
-
-
diff --git a/collectors/statsd.plugin/asterisk.conf b/collectors/statsd.plugin/asterisk.conf
deleted file mode 100644
index 160b80f93..000000000
--- a/collectors/statsd.plugin/asterisk.conf
+++ /dev/null
@@ -1,208 +0,0 @@
-[app]
- name = asterisk
- metrics = asterisk.*
- private charts = yes
- gaps when not collected = no
-
-[dictionary]
- # https://www.voip-info.org/asterisk-variable-hangupcause/
- q931.1 = unallocated 1
- q931.2 = no route transit net 2
- q931.3 = no route destination 3
- q931.6 = channel unacceptable 6
- q931.7 = call awarded delivered 7
- q931.16 = normal 16
- q931.17 = busy 17
- q931.18 = no response 18
- q931.19 = no answer 19
- q931.21 = rejected call 21
- q931.22 = number changed 22
- q931.27 = dst out of order 27
- q931.28 = invalid number 28
- q931.29 = rejected facility 29
- q931.30 = response to status 30
- q931.31 = normal unspecified 31
- q931.34 = congestion circuit 34
- q931.38 = net out of order 38
- q931.41 = normal tmp fail 41
- q931.42 = congestion switch 42
- q931.43 = access info discarded 43
- q931.44 = requested chan unavail 44
- q931.45 = pre empted 45
- q931.47 = resource unavailable, unspecified 47
- q931.50 = facility not subscribed 50
- q931.52 = outgoing call barred 52
- q931.54 = incoming call barred 54
- q931.57 = bearer capability not auth 57
- q931.58 = bearer capability not avail 58
- q931.65 = bearer capability not implemented 65
- q931.66 = chan not implemented 66
- q931.69 = facility not implemented 67
- q931.81 = invalid call reference 81
- q931.88 = incompatible destination 88
- q931.95 = invalid msg specified 95
- q931.96 = mandatory ie missing 96
- q931.97 = message type non exist 97
- q931.98 = wrong message 98
- q931.99 = ie non exist 99
- q931.100 = invalid ie contents 100
- q931.101 = wrong call state 101
- q931.102 = recovery on timer expire 102
- q931.103 = mandatory ie length error 103
- q931.111 = protocol error 111
- q931.127 = interworking 127
-
-
-[channels]
- name = channels
- title = Active Channels
- family = channels
- context = asterisk.channels
- units = channels
- priority = 91000
- type = stacked
- dimension = pattern asterisk.channels.count 'channels' last 1 1
- # FIXME: netdata needs to prevent this from going negative
-
-[endpoints]
- name = endpoints
- title = Active Endpoints
- family = endpoints
- context = asterisk.endpoints
- units = endpoints
- priority = 91005
- type = stacked
- dimension = pattern asterisk.endpoints.count 'endpoints' last 1 1
-
-[endpoints_by_status]
- name = endpoints_by_status
- title = Active Endpoints by Status
- family = endpoints
- context = asterisk.endpoints_by_status
- units = endpoints
- priority = 91006
- type = stacked
- dimension = pattern asterisk.endpoints.state.* '' last 1 1
-
-[sip_channels_by_endpoint]
- name = sip_channels_by_endpoint
- title = Active SIP channels by endpoint
- family = channels
- context = asterisk.sip_channels_by_endpoint
- units = channels
- priority = 91110
- type = stacked
- dimension = pattern asterisk.endpoints.SIP.*.channels '' last 1 1
-
-[pjsip_channels_by_endpoint]
- name = pjsip_channels_by_endpoint
- title = Active PJSIP channels by endpoint
- family = channels
- context = asterisk.pjsip_channels_by_endpoint
- units = channels
- priority = 91111
- type = stacked
- dimension = pattern asterisk.endpoints.PJSIP.*.channels '' last 1 1
-
-[dialstatuses]
- name = dialstatuses
- title = Distribution of Dial Statuses
- family = dial_statuses
- context = asterisk.dialstatus
- units = calls
- priority = 91150
- type = stacked
- dimension = pattern 'asterisk.dialstatus.*' '' last 1 1
-
-[calltime]
- name = calltime
- title = Asterisk Channels Call Duration
- family = calltime
- context = asterisk.calltime
- units = milliseconds
- priority = 91160
- type = stacked
- dimension = asterisk.channels.calltime 'calltime' average 1 1
- dimension = asterisk.channels.calltime 'sum' sum 1 1 hidden
- dimension = asterisk.channels.calltime 'count' events 1 1 hidden
-
-[hangupcause]
- name = hangupcause
- title = Distribution of Hangup Causes
- family = hangup_causes
- context = asterisk.hangupcause
- units = calls
- priority = 91200
- type = stacked
- dimension = pattern 'asterisk.hangupcause.*' 'q931.' last 1 1
-
-[hangupcause_answer]
- name = hangupcause_answer
- title = Distribution of Hangup Causes for ANSWERed calls
- family = hangup_causes
- context = asterisk.hangupcause_answer
- units = calls
- priority = 91210
- type = stacked
- dimension = pattern 'asterisk.dialhangupcause.ANSWER.*' 'q931.' last 1 1
-
-[hangupcause_busy]
- name = hangupcause_busy
- title = Distribution of Hangup Causes for BUSY calls
- family = hangup_causes
- context = asterisk.hangupcause_busy
- units = calls
- priority = 91215
- type = stacked
- dimension = pattern 'asterisk.dialhangupcause.BUSY.*' 'q931.' last 1 1
-
-[hangupcause_cancel]
- name = hangupcause_cancel
- title = Distribution of Hangup Causes for CANCELled calls
- family = hangup_causes
- context = asterisk.hangupcause_cancel
- units = calls
- priority = 91220
- type = stacked
- dimension = pattern 'asterisk.dialhangupcause.CANCEL.*' 'q931.' last 1 1
-
-[hangupcause_chanunavail]
- name = hangupcause_chanunavail
- title = Distribution of Hangup Causes for CHANUNVAILed calls
- family = hangup_causes
- context = asterisk.hangupcause_chanunavail
- units = calls
- priority = 91230
- type = stacked
- dimension = pattern 'asterisk.dialhangupcause.CHANUNAVAIL.*' 'q931.' last 1 1
-
-[hangupcause_congestion]
- name = hangupcause_congestion
- title = Distribution of Hangup Causes for CONGESTIONed calls
- family = hangup_causes
- context = asterisk.hangupcause_congestion
- units = calls
- priority = 91240
- type = stacked
- dimension = pattern 'asterisk.dialhangupcause.CONGESTION.*' 'q931.' last 1 1
-
-[events]
- name = events
- title = Asterisk Dialplan Events
- family = events
- context = asterisk.events
- units = events/s
- priority = 91400
- type = stacked
- dimension = pattern 'asterisk.stasis.message.ast_channel_*_type' '' last 1 1
-
-[qualify]
- name = qualify
- title = Asterisk PJSIP Peers Qualify
- family = qualify
- context = asterisk.qualify
- units = milliseconds
- priority = 91500
- type = stacked
- dimension = pattern 'asterisk.PJSIP.contacts.*.rtt' '' max 1 1
- # FIXME: netdata needs to set update every = 15 on this
diff --git a/collectors/statsd.plugin/asterisk.md b/collectors/statsd.plugin/asterisk.md
deleted file mode 100644
index e7a7b63ce..000000000
--- a/collectors/statsd.plugin/asterisk.md
+++ /dev/null
@@ -1,62 +0,0 @@
-<!--
-title: "Asterisk monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/statsd.plugin/asterisk.md"
-sidebar_label: "Asterisk"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/VoIP"
--->
-
-# Asterisk collector
-
-Monitors [Asterisk](https://www.asterisk.org/) dialplan application's statistics.
-
-## Requirements
-
-- Asterisk [integrated with StatsD](https://www.asterisk.org/integrating-asterisk-with-statsd/).
-
-## Configuration
-
-Netdata ships
-with [asterisk.conf](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/asterisk.conf) with
-preconfigured charts.
-
-To receive Asterisk metrics in Netdata, uncomment the following lines in the `/etc/asterisk/statsd.conf` file:
-
-```ini
-[general]
-enabled = yes ; When set to yes, statsd support is enabled
-server = 127.0.0.1 ; server[:port] of statsd server to use.
- ; If not specified, the port is 8125
-prefix = asterisk ; Prefix to prepend to all metrics
-```
-
-> See [statsd.conf.sample](https://github.com/asterisk/asterisk/blob/master/configs/samples/statsd.conf.sample) for all available options.
-
-## Charts and metrics
-
-<details><summary>Click to see screenshots of the charts.</summary>
-
-![image](https://user-images.githubusercontent.com/2662304/158055351-fcc7a7fb-9b95-4656-bdc6-2e5f5a909215.png)
-![image](https://user-images.githubusercontent.com/2662304/158055367-cfd25cd5-d71a-4bab-8cd1-bfcc47bc7312.png)
-
-</details>
-
-Mapping Asterisk StatsD metrics and Netdata charts.
-
-| Chart | Metrics |
-|------------------------------------------------------|--------------------------------------------|
-| Active Channels | asterisk.channels.count |
-| Active Endpoints | asterisk.endpoints.count |
-| Active Endpoints by Status | asterisk.endpoints.state.* |
-| Active SIP channels by endpoint | asterisk.endpoints.SIP.*.channels |
-| Active PJSIP channels by endpoint | asterisk.endpoints.PJSIP.*.channels |
-| Distribution of Dial Statuses | asterisk.dialstatus.* |
-| Asterisk Channels Call Duration | asterisk.channels.calltime |
-| Distribution of Hangup Causes | asterisk.hangupcause.* |
-| Distribution of Hangup Causes for ANSWERed calls | asterisk.dialhangupcause.ANSWER.* |
-| Distribution of Hangup Causes for BUSY calls | asterisk.dialhangupcause.BUSY.* |
-| Distribution of Hangup Causes for CANCELled calls | asterisk.dialhangupcause.CANCEL.* |
-| Distribution of Hangup Causes for CHANUNVAILed calls | asterisk.dialhangupcause.CHANUNAVAIL.* |
-| Distribution of Hangup Causes for CONGESTIONed calls | asterisk.dialhangupcause.CONGESTION.* |
-| Asterisk Dialplan Events | asterisk.stasis.message.ast_channel_*_type |
-| Asterisk PJSIP Peers Qualify | asterisk.PJSIP.contacts.*.rtt |
diff --git a/collectors/statsd.plugin/example.conf b/collectors/statsd.plugin/example.conf
deleted file mode 100644
index 2c7de6c7b..000000000
--- a/collectors/statsd.plugin/example.conf
+++ /dev/null
@@ -1,64 +0,0 @@
-# statsd synthetic charts configuration
-
-# You can add many .conf files in /etc/netdata/statsd.d/,
-# one for each of your apps.
-
-# start a new app - you can add many apps in the same file
-[app]
- # give a name for this app
- # this controls the main menu on the dashboard
- # and will be the prefix for all charts of the app
- name = myexampleapp
-
- # match all the metrics of the app
- metrics = myexampleapp.*
-
- # shall private charts of these metrics be created?
- private charts = no
-
- # shall gaps be shown when metrics are not collected?
- gaps when not collected = no
-
- # the memory mode for the charts of this app: none|map|save
- # the default is to use the global memory mode
- #memory mode = ram
-
- # the history size for the charts of this app, in seconds
- # the default is to use the global history
- #history = 3600
-
-# create a chart
-# this is its id - the chart will be named myexampleapp.myexamplechart
-[myexamplechart]
- # a name for the chart, similar to the id (2 names for each chart)
- name = myexamplechart
-
- # the chart title
- title = my chart title
-
- # the submenu of the dashboard
- family = my family
-
- # the context for alarm templates
- context = chart.context
-
- # the units of the chart
- units = tests/s
-
- # the sorting priority of the chart on the dashboard
- priority = 91000
-
- # the type of chart to create: line | area | stacked
- type = area
-
- # one or more dimensions for the chart
- # type = events | last | min | max | sum | average | percentile | median | stddev
- # events = the number of events for this metric
- # last = the last value collected
- # all the others are only valid for histograms and timers
- dimension = myexampleapp.metric1 avg average 1 1
- dimension = myexampleapp.metric1 lower min 1 1
- dimension = myexampleapp.metric1 upper max 1 1
- dimension = myexampleapp.metric2 other last 1 1
-
-# You can add as many charts as needed
diff --git a/collectors/statsd.plugin/k6.conf b/collectors/statsd.plugin/k6.conf
deleted file mode 100644
index 3bef00ca1..000000000
--- a/collectors/statsd.plugin/k6.conf
+++ /dev/null
@@ -1,110 +0,0 @@
-[app]
- name = k6
- metrics = k6*
- private charts = no
- gaps when not collected = yes
-
-[dictionary]
- http_reqs = HTTP Requests
- http_reqs_failed = Failed HTTP Requests
- vus = Virtual active users
- vus_max = max Virtual active users
- iteration_duration = iteration duration
- iteration_duration_max = max iteration duration
- iteration_duration_min = min iteration duration
- iteration_duration_avg = avg iteration duration
- dropped_iterations = Dropped iterations
- http_req_blocked = Blocked HTTP requests
- http_req_connecting = Connecting HTTP requests
- http_req_sending = Sending HTTP requests
- http_req_receiving = Receiving HTTP requests
- http_req_waiting = Waiting HTTP requests
- http_req_duration_median = Median HTTP req duration
- http_req_duration_average = Avg HTTP req duration
- http_req_duration = HTTP req duration
- http_req_duration_max = max HTTP req duration
- http_req_duration_min = min HTTP req duration
- http_req_duration_p95 = 95 percentile of HTTP req duration
- data_received = Received data
- data_sent = Sent data
-
-
-[http_reqs]
- name = http_reqs
- title = HTTP Requests rate
- family = http requests
- context = k6.http_requests
- dimension = k6.http_reqs http_reqs last 1 1 sum
- type = line
- units = requests/s
-
-[http_reqs]
- name = http_reqs_failed
- title = Failed HTTP Requests rate
- family = http requests
- context = k6.http_requests
- dimension = k6.http_reqs_failed http_reqs_failed last 1 1 sum
- type = line
- units = requests/s
-
-[vus]
- name = vus
- title = Virtual Active Users
- family = k6_metrics
- dimension = k6.vus vus last 1 1
- dimension = k6.vus_max vus_max last 1 1
- type = line
- units = vus
-
-[iteration_duration]
- name = iteration_duration_2
- title = Iteration duration
- family = k6_metrics
- dimension = k6.iteration_duration iteration_duration last 1 1
- dimension = k6.iteration_duration iteration_duration_max max 1 1
- dimension = k6.iteration_duration iteration_duration_min min 1 1
- dimension = k6.iteration_duration iteration_duration_avg average 1 1
- type = line
- units = s
-
-[dropped_iterations]
- name = dropped_iterations
- title = Dropped Iterations
- family = k6_metrics
- dimension = k6.dropped_iterations dropped_iterations last 1 1
- units = iterations
- type = line
-
-[data]
- name = data
- title = K6 Data
- family = k6_metrics
- dimension = k6.data_received data_received last 1 1
- dimension = k6.data_sent data_sent last -1 1
- units = kb/s
- type = area
-
-[http_req_duration_types]
- name = http_req_duration_types
- title = HTTP Requests total duration
- family = http requests
- dimension = k6.http_req_sending http_req_sending last 1 1
- dimension = k6.http_req_waiting http_req_waiting last 1 1
- dimension = k6.http_req_receiving http_req_receiving last 1 1
- dimension = k6.http_req_blocked http_req_blocked last 1 1
- dimension = k6.http_req_connecting http_req_connecting last 1 1
- units = ms
- type = stacked
-
-[http_req_duration]
- name = http_req_duration
- title = HTTP duration metrics
- family = http requests
- dimension = k6.http_req_duration http_req_duration_median median 1 1
- dimension = k6.http_req_duration http_req_duration_max max 1 1
- dimension = k6.http_req_duration http_req_duration_average average 1 1
- dimension = k6.http_req_duration http_req_duration_min min 1 1
- dimension = k6.http_req_duration http_req_duration_p95 percentile 1 1
- dimension = k6.http_req_duration http_req_duration last 1 1
- units = ms
- type = line
diff --git a/collectors/statsd.plugin/k6.md b/collectors/statsd.plugin/k6.md
deleted file mode 100644
index 13608a8a8..000000000
--- a/collectors/statsd.plugin/k6.md
+++ /dev/null
@@ -1,77 +0,0 @@
-<!--
-title: "K6 load test monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/statsd.plugin/k6.md"
-sidebar_label: "K6 Load Testing"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/apps"
--->
-
-# K6 load test collector
-
-Monitors the impact of load testing experiments performed with [K6](https://k6.io/).
-
-You can read more about the metrics that K6 sends in the [K6 documentation](https://k6.io/docs/using-k6/metrics/).
-
-## Requirements
-
-- When running the k6 experiment, specify a [StatsD output](https://k6.io/docs/results-visualization/statsd/).
- - Tip: K6 currently supports tags only with [datadog output](https://k6.io/docs/results-visualization/datadog/), which is in essence StatsD. Netdata can be used with both.
-
-## Metrics
-
-![image](https://user-images.githubusercontent.com/13405632/117691411-8a7baf00-b1c4-11eb-9d87-8e9e7214871f.png)
-
-
-### HTTP Requests
-
-Number of HTTP requests that K6 generates, per second.
-
-### Failed HTTP Requests
-
-Number of failed HTTP requests that K6 generates, per second.
-
-### Virtual Active Users
-Current number of active virtual users.
-
-### Iteration Duration
-
-The time it took K6 to complete one full iteration of the main function.
-
-### Dropped Iterations
-
-The number of iterations that could not be started either due to lack of Virtual Users or lack of time.
-
-### Data
-
-The amount of data received and sent.
-
-### TTP Requests total duration
-
-The total duration it took for a round-trip of an HTTP request. It includes:
-- Blocked HTTP requests: time spent blocked before initiating the request
-- Connecting HTTP requests: time spent establishing TCP connection to the remote host
-- Sending HTTP requests: time spent sending data to the remote host
-- Receiving HTTP requests: time spent receiving data from the remote host
-- Waiting HTTP requests: time spent waiting for response from the remote host
-
-### HTTP duration metrics
-
-Different metrics on the HTTP request as defined by K6. The HTTP request duration is defined by K6 as: `HTTP sending request` + `HTTP receiving request` + `HTTP waiting request`.
-
-Metrics:
-- Median
-- Average
-- Max
-- Min
-- 95th percentile
-- absolute (the value as it is, without any computation)
-
-## Configuration
-
-The collector is preconfigured and defined in `statsd.plugin/k6.conf`.
-
-Due to being a StatsD collector, you only need to define the configuration file and then send data to Netdata using the StatsD protocol.
-
-If Netdata is running on the same machine as K6, no further configuration is required. Otherwise, you will have to [point K6](https://k6.io/docs/results-visualization/statsd/) to your node and make sure that the K6 process can reach Netdata.
-
-The default namespace that is used in the configuration is `k6`. If you change it in K6, you will have to change it as well in the configuration file `k6.conf`.
diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c
deleted file mode 100644
index 9cc3a9d97..000000000
--- a/collectors/statsd.plugin/statsd.c
+++ /dev/null
@@ -1,2893 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "daemon/common.h"
-
-#define STATSD_CHART_PREFIX "statsd"
-
-#define PLUGIN_STATSD_NAME "statsd.plugin"
-
-#define STATSD_LISTEN_PORT 8125
-#define STATSD_LISTEN_BACKLOG 4096
-
-#define WORKER_JOB_TYPE_TCP_CONNECTED 0
-#define WORKER_JOB_TYPE_TCP_DISCONNECTED 1
-#define WORKER_JOB_TYPE_RCV_DATA 2
-#define WORKER_JOB_TYPE_SND_DATA 3
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 4
-#error Please increase WORKER_UTILIZATION_MAX_JOB_TYPES to at least 4
-#endif
-
-// --------------------------------------------------------------------------------------
-
-// DO NOT ENABLE MULTITHREADING - IT IS NOT WELL TESTED
-// #define STATSD_MULTITHREADED 1
-
-#define STATSD_DICTIONARY_OPTIONS (DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_ADD_IN_FRONT)
-#define STATSD_DECIMAL_DETAIL 1000 // floating point values get multiplied by this, with the same divisor
-
-// --------------------------------------------------------------------------------------------------------------------
-// data specific to each metric type
-
-typedef struct statsd_metric_gauge {
- NETDATA_DOUBLE value;
-} STATSD_METRIC_GAUGE;
-
-typedef struct statsd_metric_counter { // counter and meter
- collected_number value;
-} STATSD_METRIC_COUNTER;
-
-typedef struct statsd_histogram_extensions {
- netdata_mutex_t mutex;
-
- // average is stored in metric->last
- collected_number last_min;
- collected_number last_max;
- collected_number last_percentile;
- collected_number last_median;
- collected_number last_stddev;
- collected_number last_sum;
-
- int zeroed;
-
- RRDDIM *rd_min;
- RRDDIM *rd_max;
- RRDDIM *rd_percentile;
- RRDDIM *rd_median;
- RRDDIM *rd_stddev;
- //RRDDIM *rd_sum;
-
- uint32_t size;
- uint32_t used;
- NETDATA_DOUBLE *values; // dynamic array of values collected
-} STATSD_METRIC_HISTOGRAM_EXTENSIONS;
-
-typedef struct statsd_metric_histogram { // histogram and timer
- STATSD_METRIC_HISTOGRAM_EXTENSIONS *ext;
-} STATSD_METRIC_HISTOGRAM;
-
-typedef struct statsd_metric_set {
- DICTIONARY *dict;
-} STATSD_METRIC_SET;
-
-typedef struct statsd_metric_dictionary_item {
- uint32_t count;
- RRDDIM *rd;
-} STATSD_METRIC_DICTIONARY_ITEM;
-
-typedef struct statsd_metric_dictionary {
- DICTIONARY *dict;
-} STATSD_METRIC_DICTIONARY;
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// this is a metric - for all types of metrics
-
-typedef enum __attribute__((packed)) statsd_metric_options {
- STATSD_METRIC_OPTION_NONE = 0x00000000, // no options set
- STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED = 0x00000001, // do not update the chart dimension, when this metric is not collected
- STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED = 0x00000002, // render a private chart for this metric
- STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED = 0x00000004, // the metric has been checked if it should get private chart or not
- STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT = 0x00000008, // show the count of events for this private chart
- STATSD_METRIC_OPTION_CHECKED_IN_APPS = 0x00000010, // set when this metric has been checked against apps
- STATSD_METRIC_OPTION_USED_IN_APPS = 0x00000020, // set when this metric is used in apps
- STATSD_METRIC_OPTION_CHECKED = 0x00000040, // set when the charting thread checks this metric for use in charts (its usefulness)
- STATSD_METRIC_OPTION_USEFUL = 0x00000080, // set when the charting thread finds the metric useful (i.e. used in a chart)
- STATSD_METRIC_OPTION_COLLECTION_FULL_LOGGED = 0x00000100, // set when the collection is full for this metric
- STATSD_METRIC_OPTION_UPDATED_CHART_METADATA = 0x00000200, // set when the private chart metadata have been updated via tags
- STATSD_METRIC_OPTION_OBSOLETE = 0x00004000, // set when the metric is obsoleted
-} STATS_METRIC_OPTIONS;
-
-typedef enum __attribute__((packed)) statsd_metric_type {
- STATSD_METRIC_TYPE_GAUGE,
- STATSD_METRIC_TYPE_COUNTER,
- STATSD_METRIC_TYPE_METER,
- STATSD_METRIC_TYPE_TIMER,
- STATSD_METRIC_TYPE_HISTOGRAM,
- STATSD_METRIC_TYPE_SET,
- STATSD_METRIC_TYPE_DICTIONARY
-} STATSD_METRIC_TYPE;
-
-
-typedef struct statsd_metric {
- const char *name; // the name of the metric - linked to dictionary name
- uint32_t hash; // hash of the name
-
- STATSD_METRIC_TYPE type;
-
- // metadata about data collection
- collected_number events; // the number of times this metric has been collected (never resets)
- uint32_t count; // the number of times this metric has been collected since the last flush
- time_t last_collected; // timestamp of the last incoming value
-
- // the actual collected data
- union {
- STATSD_METRIC_GAUGE gauge;
- STATSD_METRIC_COUNTER counter;
- STATSD_METRIC_HISTOGRAM histogram;
- STATSD_METRIC_SET set;
- STATSD_METRIC_DICTIONARY dictionary;
- };
-
- char *units;
- char *dimname;
- char *family;
-
- // chart related members
- STATS_METRIC_OPTIONS options; // STATSD_METRIC_OPTION_* (bitfield)
- char reset; // set to 1 by the charting thread to instruct the collector thread(s) to reset this metric
- collected_number last; // the last value sent to netdata
- RRDSET *st; // the private chart of this metric
- RRDDIM *rd_value; // the dimension of this metric value
- RRDDIM *rd_count; // the dimension for the number of events received
-
- // linking, used for walking through all metrics
- struct statsd_metric *next_useful;
-} STATSD_METRIC;
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// each type of metric has its own index
-
-typedef struct statsd_index {
- char *name; // the name of the index of metrics
- uint32_t events; // the number of events processed for this index
- uint32_t metrics; // the number of metrics in this index
- uint32_t useful; // the number of useful metrics in this index
-
- STATS_METRIC_OPTIONS default_options; // default options for all metrics in this index
- STATSD_METRIC_TYPE type; // the type of index
- DICTIONARY *dict;
-
- STATSD_METRIC *first_useful; // the linked list of useful metrics (new metrics are added in front)
-} STATSD_INDEX;
-
-// --------------------------------------------------------------------------------------------------------------------
-// synthetic charts
-
-typedef enum __attribute__((packed)) statsd_app_chart_dimension_value_type {
- STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS,
- STATSD_APP_CHART_DIM_VALUE_TYPE_LAST,
- STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE,
- STATSD_APP_CHART_DIM_VALUE_TYPE_SUM,
- STATSD_APP_CHART_DIM_VALUE_TYPE_MIN,
- STATSD_APP_CHART_DIM_VALUE_TYPE_MAX,
- STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE,
- STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN,
- STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV
-} STATSD_APP_CHART_DIM_VALUE_TYPE;
-
-typedef struct statsd_app_chart_dimension {
- const char *name; // the name of this dimension
- const char *metric; // the source metric name of this dimension
- uint32_t metric_hash; // hash for fast string comparisons
-
- int32_t multiplier; // the multiplier of the dimension
- int32_t divisor; // the divisor of the dimension
- RRDDIM_FLAGS flags; // the RRDDIM flags for this dimension
- RRDDIM_OPTIONS options; // the RRDDIM options for this dimension
- RRD_ALGORITHM algorithm; // the algorithm of this dimension
-
- STATSD_APP_CHART_DIM_VALUE_TYPE value_type; // which value to use of the source metric
-
- SIMPLE_PATTERN *metric_pattern; // set when the 'metric' is a simple pattern
-
- RRDDIM *rd; // a pointer to the RRDDIM that has been created for this dimension
- collected_number *value_ptr; // a pointer to the source metric value
-
- struct statsd_app_chart_dimension *next; // the next dimension for this chart
-} STATSD_APP_CHART_DIM;
-
-typedef struct statsd_app_chart {
- const char *id;
- const char *name;
- const char *title;
- const char *family;
- const char *context;
- const char *units;
- const char *module;
- int32_t priority;
- RRDSET_TYPE chart_type;
- STATSD_APP_CHART_DIM *dimensions;
- uint32_t dimensions_count;
- uint32_t dimensions_linked_count;
-
- RRDSET *st;
- struct statsd_app_chart *next;
-} STATSD_APP_CHART;
-
-typedef struct statsd_app {
- const char *name;
- SIMPLE_PATTERN *metrics;
- STATS_METRIC_OPTIONS default_options;
- RRD_MEMORY_MODE rrd_memory_mode;
- int32_t rrd_history_entries;
- DICTIONARY *dict;
-
- const char *source;
- STATSD_APP_CHART *charts;
- struct statsd_app *next;
-} STATSD_APP;
-
-// --------------------------------------------------------------------------------------------------------------------
-// global statsd data
-
-struct collection_thread_status {
- SPINLOCK spinlock;
- bool running;
- uint32_t max_sockets;
-
- netdata_thread_t thread;
-};
-
-static struct statsd {
- STATSD_INDEX gauges;
- STATSD_INDEX counters;
- STATSD_INDEX timers;
- STATSD_INDEX histograms;
- STATSD_INDEX meters;
- STATSD_INDEX sets;
- STATSD_INDEX dictionaries;
-
- size_t unknown_types;
- size_t socket_errors;
- size_t tcp_socket_connects;
- size_t tcp_socket_disconnects;
- size_t tcp_socket_connected;
- size_t tcp_socket_reads;
- size_t tcp_packets_received;
- size_t tcp_bytes_read;
- size_t udp_socket_reads;
- size_t udp_packets_received;
- size_t udp_bytes_read;
-
- int32_t update_every;
- bool enabled;
- bool private_charts_hidden;
- SIMPLE_PATTERN *charts_for;
-
- uint32_t tcp_idle_timeout;
- collected_number decimal_detail;
- uint32_t private_charts;
- uint32_t max_private_charts_hard;
- uint32_t set_obsolete_after;
-
- STATSD_APP *apps;
- uint32_t recvmmsg_size;
- uint32_t histogram_increase_step;
- uint32_t dictionary_max_unique;
- double histogram_percentile;
- char *histogram_percentile_str;
-
- int threads;
- struct collection_thread_status *collection_threads_status;
-
- LISTEN_SOCKETS sockets;
-} statsd = {
- .enabled = 1,
- .max_private_charts_hard = 1000,
- .private_charts_hidden = false,
- .recvmmsg_size = 10,
- .decimal_detail = STATSD_DECIMAL_DETAIL,
-
- .gauges = {
- .name = "gauge",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_GAUGE,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .counters = {
- .name = "counter",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_COUNTER,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .timers = {
- .name = "timer",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_TIMER,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .histograms = {
- .name = "histogram",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_HISTOGRAM,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .meters = {
- .name = "meter",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_METER,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .sets = {
- .name = "set",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_SET,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
- .dictionaries = {
- .name = "dictionary",
- .events = 0,
- .metrics = 0,
- .dict = NULL,
- .type = STATSD_METRIC_TYPE_DICTIONARY,
- .default_options = STATSD_METRIC_OPTION_NONE
- },
-
- .tcp_idle_timeout = 600,
-
- .apps = NULL,
- .histogram_percentile = 95.0,
- .histogram_increase_step = 10,
- .dictionary_max_unique = 200,
- .threads = 0,
- .collection_threads_status = NULL,
- .sockets = {
- .config = &netdata_config,
- .config_section = CONFIG_SECTION_STATSD,
- .default_bind_to = "udp:localhost tcp:localhost",
- .default_port = STATSD_LISTEN_PORT,
- .backlog = STATSD_LISTEN_BACKLOG
- },
-};
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd index management - add/find metrics
-
-static void dictionary_metric_insert_callback(const DICTIONARY_ITEM *item, void *value, void *data) {
- STATSD_INDEX *index = (STATSD_INDEX *)data;
- STATSD_METRIC *m = (STATSD_METRIC *)value;
- const char *name = dictionary_acquired_item_name(item);
-
- netdata_log_debug(D_STATSD, "Creating new %s metric '%s'", index->name, name);
-
- m->name = name;
- m->hash = simple_hash(name);
- m->type = index->type;
- m->options = index->default_options;
-
- if (m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
- m->histogram.ext = callocz(1,sizeof(STATSD_METRIC_HISTOGRAM_EXTENSIONS));
- netdata_mutex_init(&m->histogram.ext->mutex);
- }
-
- __atomic_fetch_add(&index->metrics, 1, __ATOMIC_RELAXED);
-}
-
-static void dictionary_metric_delete_callback(const DICTIONARY_ITEM *item, void *value, void *data) {
- (void)data; // STATSD_INDEX *index = (STATSD_INDEX *)data;
- (void)item;
- STATSD_METRIC *m = (STATSD_METRIC *)value;
-
- if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
- freez(m->histogram.ext);
- m->histogram.ext = NULL;
- }
-
- freez(m->units);
- freez(m->family);
- freez(m->dimname);
-}
-
-static inline STATSD_METRIC *statsd_find_or_add_metric(STATSD_INDEX *index, const char *name) {
- netdata_log_debug(D_STATSD, "searching for metric '%s' under '%s'", name, index->name);
-
-#ifdef STATSD_MULTITHREADED
- // avoid the write lock of dictionary_set() for existing metrics
- STATSD_METRIC *m = dictionary_get(index->dict, name);
- if(!m) m = dictionary_set(index->dict, name, NULL, sizeof(STATSD_METRIC));
-#else
- // no locks here, go faster
- // this will call the dictionary_metric_insert_callback() if an item
- // is inserted, otherwise it will return the existing one.
- // We used the flag DICT_OPTION_DONT_OVERWRITE_VALUE to support this.
- STATSD_METRIC *m = dictionary_set(index->dict, name, NULL, sizeof(STATSD_METRIC));
-#endif
-
- index->events++;
- return m;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd parsing numbers
-
-static inline NETDATA_DOUBLE statsd_parse_float(const char *v, NETDATA_DOUBLE def) {
- NETDATA_DOUBLE value;
-
- if(likely(v && *v)) {
- char *e = NULL;
- value = str2ndd(v, &e);
- if(unlikely(e && *e))
- collector_error("STATSD: excess data '%s' after value '%s'", e, v);
- }
- else
- value = def;
-
- return value;
-}
-
-static inline NETDATA_DOUBLE statsd_parse_sampling_rate(const char *v) {
- NETDATA_DOUBLE sampling_rate = statsd_parse_float(v, 1.0);
- if(unlikely(isless(sampling_rate, 0.001))) sampling_rate = 0.001;
- if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
- return sampling_rate;
-}
-
-static inline long long statsd_parse_int(const char *v, long long def) {
- long long value;
-
- if(likely(v && *v)) {
- char *e = NULL;
- value = str2ll(v, &e);
- if(unlikely(e && *e))
- collector_error("STATSD: excess data '%s' after value '%s'", e, v);
- }
- else
- value = def;
-
- return value;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd processors per metric type
-
-static inline void statsd_reset_metric(STATSD_METRIC *m) {
- m->reset = 0;
- m->count = 0;
-}
-
-static inline int value_is_zinit(const char *value) {
- return (value && *value == 'z' && *++value == 'i' && *++value == 'n' && *++value == 'i' && *++value == 't' && *++value == '\0');
-}
-
-#define is_metric_checked(m) ((m)->options & STATSD_METRIC_OPTION_CHECKED)
-#define is_metric_useful_for_collection(m) (!is_metric_checked(m) || ((m)->options & STATSD_METRIC_OPTION_USEFUL))
-
-static inline void metric_update_counters_and_obsoletion(STATSD_METRIC *m) {
- m->events++;
- m->count++;
- m->last_collected = now_realtime_sec();
- if (m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE))) {
- rrdset_isnot_obsolete___safe_from_collector_thread(m->st);
- m->options &= ~STATSD_METRIC_OPTION_OBSOLETE;
- }
-}
-
-static inline void statsd_process_gauge(STATSD_METRIC *m, const char *value, const char *sampling) {
- if(!is_metric_useful_for_collection(m)) return;
-
- if(unlikely(!value || !*value)) {
- collector_error("STATSD: metric '%s' of type gauge, with empty value is ignored.", m->name);
- return;
- }
-
- if(unlikely(m->reset)) {
- // no need to reset anything specific for gauges
- statsd_reset_metric(m);
- }
-
- if(unlikely(value_is_zinit(value))) {
- // magic loading of metric, without affecting anything
- }
- else {
- if (unlikely(*value == '+' || *value == '-'))
- m->gauge.value += statsd_parse_float(value, 1.0) / statsd_parse_sampling_rate(sampling);
- else
- m->gauge.value = statsd_parse_float(value, 1.0);
-
- metric_update_counters_and_obsoletion(m);
- }
-}
-
-static inline void statsd_process_counter_or_meter(STATSD_METRIC *m, const char *value, const char *sampling) {
- if(!is_metric_useful_for_collection(m)) return;
-
- // we accept empty values for counters
-
- if(unlikely(m->reset)) statsd_reset_metric(m);
-
- if(unlikely(value_is_zinit(value))) {
- // magic loading of metric, without affecting anything
- }
- else {
- m->counter.value += llrintndd((NETDATA_DOUBLE) statsd_parse_int(value, 1) / statsd_parse_sampling_rate(sampling));
-
- metric_update_counters_and_obsoletion(m);
- }
-}
-
-#define statsd_process_counter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
-#define statsd_process_meter(m, value, sampling) statsd_process_counter_or_meter(m, value, sampling)
-
-static inline void statsd_process_histogram_or_timer(STATSD_METRIC *m, const char *value, const char *sampling, const char *type) {
- if(!is_metric_useful_for_collection(m)) return;
-
- if(unlikely(!value || !*value)) {
- collector_error("STATSD: metric of type %s, with empty value is ignored.", type);
- return;
- }
-
- if(unlikely(m->reset)) {
- m->histogram.ext->used = 0;
- statsd_reset_metric(m);
- }
-
- if(unlikely(value_is_zinit(value))) {
- // magic loading of metric, without affecting anything
- }
- else {
- NETDATA_DOUBLE v = statsd_parse_float(value, 1.0);
- NETDATA_DOUBLE sampling_rate = statsd_parse_sampling_rate(sampling);
- if(unlikely(isless(sampling_rate, 0.01))) sampling_rate = 0.01;
- if(unlikely(isgreater(sampling_rate, 1.0))) sampling_rate = 1.0;
-
- long long samples = llrintndd(1.0 / sampling_rate);
- while(samples-- > 0) {
-
- if(unlikely(m->histogram.ext->used == m->histogram.ext->size)) {
- netdata_mutex_lock(&m->histogram.ext->mutex);
- m->histogram.ext->size += statsd.histogram_increase_step;
- m->histogram.ext->values = reallocz(m->histogram.ext->values, sizeof(NETDATA_DOUBLE) * m->histogram.ext->size);
- netdata_mutex_unlock(&m->histogram.ext->mutex);
- }
-
- m->histogram.ext->values[m->histogram.ext->used++] = v;
- }
-
- metric_update_counters_and_obsoletion(m);
- }
-}
-
-#define statsd_process_timer(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "timer")
-#define statsd_process_histogram(m, value, sampling) statsd_process_histogram_or_timer(m, value, sampling, "histogram")
-
-static inline void statsd_process_set(STATSD_METRIC *m, const char *value) {
- if(!is_metric_useful_for_collection(m)) return;
-
- if(unlikely(!value || !*value)) {
- netdata_log_error("STATSD: metric of type set, with empty value is ignored.");
- return;
- }
-
- if(unlikely(m->reset)) {
- if(likely(m->set.dict)) {
- dictionary_destroy(m->set.dict);
- m->set.dict = NULL;
- }
- statsd_reset_metric(m);
- }
-
- if (unlikely(!m->set.dict))
- m->set.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
-
- if(unlikely(value_is_zinit(value))) {
- // magic loading of metric, without affecting anything
- }
- else {
-#ifdef STATSD_MULTITHREADED
- // avoid the write lock to check if something is already there
- if(!dictionary_get(m->set.dict, value))
- dictionary_set(m->set.dict, value, NULL, 0);
-#else
- dictionary_set(m->set.dict, value, NULL, 0);
-#endif
- metric_update_counters_and_obsoletion(m);
- }
-}
-
-static inline void statsd_process_dictionary(STATSD_METRIC *m, const char *value) {
- if(!is_metric_useful_for_collection(m)) return;
-
- if(unlikely(!value || !*value)) {
- netdata_log_error("STATSD: metric of type set, with empty value is ignored.");
- return;
- }
-
- if(unlikely(m->reset))
- statsd_reset_metric(m);
-
- if (unlikely(!m->dictionary.dict))
- m->dictionary.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
-
- if(unlikely(value_is_zinit(value))) {
- // magic loading of metric, without affecting anything
- }
- else {
- STATSD_METRIC_DICTIONARY_ITEM *t = (STATSD_METRIC_DICTIONARY_ITEM *)dictionary_get(m->dictionary.dict, value);
-
- if (unlikely(!t)) {
- if(!t && dictionary_entries(m->dictionary.dict) >= statsd.dictionary_max_unique)
- value = "other";
-
- t = (STATSD_METRIC_DICTIONARY_ITEM *)dictionary_set(m->dictionary.dict, value, NULL, sizeof(STATSD_METRIC_DICTIONARY_ITEM));
- }
-
- t->count++;
- metric_update_counters_and_obsoletion(m);
- }
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd parsing
-
-static inline const char *statsd_parse_skip_up_to(const char *s, char d1, char d2, char d3) {
- char c;
-
- for(c = *s; c && c != d1 && c != d2 && c != d3 && c != '\r' && c != '\n'; c = *++s) ;
-
- return s;
-}
-
-const char *statsd_parse_skip_spaces(const char *s) {
- char c;
-
- for(c = *s; c && ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ); c = *++s) ;
-
- return s;
-}
-
-static inline const char *statsd_parse_field_trim(const char *start, char *end) {
- if(unlikely(!start || !*start)) {
- start = end;
- return start;
- }
-
- while(start <= end && (*start == ' ' || *start == '\t'))
- start++;
-
- *end = '\0';
- end--;
- while(end >= start && (*end == ' ' || *end == '\t'))
- *end-- = '\0';
-
- return start;
-}
-
-static void statsd_process_metric(const char *name, const char *value, const char *type, const char *sampling, const char *tags) {
- netdata_log_debug(D_STATSD, "STATSD: raw metric '%s', value '%s', type '%s', sampling '%s', tags '%s'", name?name:"(null)", value?value:"(null)", type?type:"(null)", sampling?sampling:"(null)", tags?tags:"(null)");
-
- if(unlikely(!name || !*name)) return;
- if(unlikely(!type || !*type)) type = "m";
-
- STATSD_METRIC *m = NULL;
-
- char t0 = type[0], t1 = type[1];
- if(unlikely(t0 == 'g' && t1 == '\0')) {
- statsd_process_gauge(
- m = statsd_find_or_add_metric(&statsd.gauges, name),
- value, sampling);
- }
- else if(unlikely((t0 == 'c' || t0 == 'C') && t1 == '\0')) {
- // etsy/statsd uses 'c'
- // brubeck uses 'C'
- statsd_process_counter(
- m = statsd_find_or_add_metric(&statsd.counters, name),
- value, sampling);
- }
- else if(unlikely(t0 == 'm' && t1 == '\0')) {
- statsd_process_meter(
- m = statsd_find_or_add_metric(&statsd.meters, name),
- value, sampling);
- }
- else if(unlikely(t0 == 'h' && t1 == '\0')) {
- statsd_process_histogram(
- m = statsd_find_or_add_metric(&statsd.histograms, name),
- value, sampling);
- }
- else if(unlikely(t0 == 's' && t1 == '\0')) {
- statsd_process_set(
- m = statsd_find_or_add_metric(&statsd.sets, name),
- value);
- }
- else if(unlikely(t0 == 'd' && t1 == '\0')) {
- statsd_process_dictionary(
- m = statsd_find_or_add_metric(&statsd.dictionaries, name),
- value);
- }
- else if(unlikely(t0 == 'm' && t1 == 's' && type[2] == '\0')) {
- statsd_process_timer(
- m = statsd_find_or_add_metric(&statsd.timers, name),
- value, sampling);
- }
- else {
- statsd.unknown_types++;
- netdata_log_error("STATSD: metric '%s' with value '%s' is sent with unknown metric type '%s'", name, value?value:"", type);
- }
-
- if(m && tags && *tags) {
- const char *s = tags;
- while(*s) {
- const char *tagkey = NULL, *tagvalue = NULL;
- char *tagkey_end = NULL, *tagvalue_end = NULL;
-
- s = tagkey_end = (char *)statsd_parse_skip_up_to(tagkey = s, ':', '=', ',');
- if(tagkey == tagkey_end) {
- if (*s) {
- s++;
- s = statsd_parse_skip_spaces(s);
- }
- continue;
- }
-
- if(likely(*s == ':' || *s == '='))
- s = tagvalue_end = (char *) statsd_parse_skip_up_to(tagvalue = ++s, ',', '\0', '\0');
-
- if(*s == ',') s++;
-
- statsd_parse_field_trim(tagkey, tagkey_end);
- statsd_parse_field_trim(tagvalue, tagvalue_end);
-
- if(tagkey && *tagkey && tagvalue && *tagvalue) {
- if (strcmp(tagkey, "units") == 0 && (!m->units || strcmp(m->units, tagvalue) != 0)) {
- m->units = strdupz(tagvalue);
- m->options |= STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
- }
-
- if (strcmp(tagkey, "name") == 0 && (!m->dimname || strcmp(m->dimname, tagvalue) != 0)) {
- m->dimname = strdupz(tagvalue);
- m->options |= STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
- }
-
- if (strcmp(tagkey, "family") == 0 && (!m->family || strcmp(m->family, tagvalue) != 0)) {
- m->family = strdupz(tagvalue);
- m->options |= STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
- }
- }
- }
- }
-}
-
-static inline size_t statsd_process(char *buffer, size_t size, int require_newlines) {
- buffer[size] = '\0';
- netdata_log_debug(D_STATSD, "RECEIVED: %zu bytes: '%s'", size, buffer);
-
- const char *s = buffer;
- while(*s) {
- const char *name = NULL, *value = NULL, *type = NULL, *sampling = NULL, *tags = NULL;
- char *name_end = NULL, *value_end = NULL, *type_end = NULL, *sampling_end = NULL, *tags_end = NULL;
-
- s = name_end = (char *)statsd_parse_skip_up_to(name = s, ':', '=', '|');
- if(name == name_end) {
- if (*s) {
- s++;
- s = statsd_parse_skip_spaces(s);
- }
- continue;
- }
-
- if(likely(*s == ':' || *s == '='))
- s = value_end = (char *) statsd_parse_skip_up_to(value = ++s, '|', '@', '#');
-
- if(likely(*s == '|'))
- s = type_end = (char *) statsd_parse_skip_up_to(type = ++s, '|', '@', '#');
-
- while(*s == '|' || *s == '@' || *s == '#') {
- // parse all the fields that may be appended
-
- if ((*s == '|' && s[1] == '@') || *s == '@') {
- s = sampling_end = (char *)statsd_parse_skip_up_to(sampling = ++s, '|', '@', '#');
- if (*sampling == '@') sampling++;
- }
- else if ((*s == '|' && s[1] == '#') || *s == '#') {
- s = tags_end = (char *)statsd_parse_skip_up_to(tags = ++s, '|', '@', '#');
- if (*tags == '#') tags++;
- }
- else {
- // unknown field, skip it
- s = (char *)statsd_parse_skip_up_to(++s, '|', '@', '#');
- }
- }
-
- // skip everything until the end of the line
- while(*s && *s != '\n') s++;
-
- if(unlikely(require_newlines && *s != '\n' && s > buffer)) {
- // move the remaining data to the beginning
- size -= (name - buffer);
- memmove(buffer, name, size);
- return size;
- }
- else
- s = statsd_parse_skip_spaces(s);
-
- statsd_process_metric(
- statsd_parse_field_trim(name, name_end)
- , statsd_parse_field_trim(value, value_end)
- , statsd_parse_field_trim(type, type_end)
- , statsd_parse_field_trim(sampling, sampling_end)
- , statsd_parse_field_trim(tags, tags_end)
- );
- }
-
- return 0;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd pollfd interface
-
-#define STATSD_TCP_BUFFER_SIZE 65536 // minimize tcp reads
-#define STATSD_UDP_BUFFER_SIZE 9000 // this should be up to MTU
-
-typedef enum {
- STATSD_SOCKET_DATA_TYPE_TCP,
- STATSD_SOCKET_DATA_TYPE_UDP
-} STATSD_SOCKET_DATA_TYPE;
-
-struct statsd_tcp {
- STATSD_SOCKET_DATA_TYPE type;
- size_t size;
- size_t len;
- char buffer[];
-};
-
-struct statsd_udp {
- struct collection_thread_status *status;
- STATSD_SOCKET_DATA_TYPE type;
-
-#ifdef HAVE_RECVMMSG
- size_t size;
- struct iovec *iovecs;
- struct mmsghdr *msgs;
-#else
- int *running;
- char buffer[STATSD_UDP_BUFFER_SIZE];
-#endif
-};
-
-// new TCP client connected
-static void *statsd_add_callback(POLLINFO *pi, short int *events, void *data) {
- (void)pi;
- (void)data;
-
- worker_is_busy(WORKER_JOB_TYPE_TCP_CONNECTED);
- *events = POLLIN;
-
- struct statsd_tcp *t = (struct statsd_tcp *)callocz(sizeof(struct statsd_tcp) + STATSD_TCP_BUFFER_SIZE, 1);
- t->type = STATSD_SOCKET_DATA_TYPE_TCP;
- t->size = STATSD_TCP_BUFFER_SIZE - 1;
- statsd.tcp_socket_connects++;
- statsd.tcp_socket_connected++;
-
- worker_is_idle();
- return t;
-}
-
-// TCP client disconnected
-static void statsd_del_callback(POLLINFO *pi) {
- worker_is_busy(WORKER_JOB_TYPE_TCP_DISCONNECTED);
-
- struct statsd_tcp *t = pi->data;
-
- if(likely(t)) {
- if(t->type == STATSD_SOCKET_DATA_TYPE_TCP) {
- if(t->len != 0) {
- statsd.socket_errors++;
- netdata_log_error("STATSD: client is probably sending unterminated metrics. Closed socket left with '%s'. Trying to process it.", t->buffer);
- statsd_process(t->buffer, t->len, 0);
- }
- statsd.tcp_socket_disconnects++;
- statsd.tcp_socket_connected--;
- }
- else
- netdata_log_error("STATSD: internal error: received socket data type is %d, but expected %d", (int)t->type, (int)STATSD_SOCKET_DATA_TYPE_TCP);
-
- freez(t);
- }
-
- worker_is_idle();
-}
-
-// Receive data
-static int statsd_rcv_callback(POLLINFO *pi, short int *events) {
- int retval = -1;
- worker_is_busy(WORKER_JOB_TYPE_RCV_DATA);
-
- *events = POLLIN;
-
- int fd = pi->fd;
-
- switch(pi->socktype) {
- case SOCK_STREAM: {
- struct statsd_tcp *d = (struct statsd_tcp *)pi->data;
- if(unlikely(!d)) {
- netdata_log_error("STATSD: internal error: expected TCP data pointer is NULL");
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_TCP)) {
- netdata_log_error("STATSD: internal error: socket data type should be %d, but it is %d", (int)STATSD_SOCKET_DATA_TYPE_TCP, (int)d->type);
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
-#endif
-
- int ret = 0;
- ssize_t rc;
- do {
- rc = recv(fd, &d->buffer[d->len], d->size - d->len, MSG_DONTWAIT);
- if (rc < 0) {
- // read failed
- if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
- netdata_log_error("STATSD: recv() on TCP socket %d failed.", fd);
- statsd.socket_errors++;
- ret = -1;
- }
- }
- else if (!rc) {
- // connection closed
- netdata_log_debug(D_STATSD, "STATSD: client disconnected.");
- ret = -1;
- }
- else {
- // data received
- d->len += rc;
- statsd.tcp_socket_reads++;
- statsd.tcp_bytes_read += rc;
- }
-
- if(likely(d->len > 0)) {
- statsd.tcp_packets_received++;
- d->len = statsd_process(d->buffer, d->len, 1);
- }
-
- if(unlikely(ret == -1)) {
- retval = -1;
- goto cleanup;
- }
-
- } while (rc != -1);
- break;
- }
-
- case SOCK_DGRAM: {
- struct statsd_udp *d = (struct statsd_udp *)pi->data;
- if(unlikely(!d)) {
- netdata_log_error("STATSD: internal error: expected UDP data pointer is NULL");
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(d->type != STATSD_SOCKET_DATA_TYPE_UDP)) {
- netdata_log_error("STATSD: internal error: socket data should be %d, but it is %d", (int)d->type, (int)STATSD_SOCKET_DATA_TYPE_UDP);
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
-#endif
-
-#ifdef HAVE_RECVMMSG
- ssize_t rc;
- do {
- rc = recvmmsg(fd, d->msgs, (unsigned int)d->size, MSG_DONTWAIT, NULL);
- if (rc < 0) {
- // read failed
- if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
- netdata_log_error("STATSD: recvmmsg() on UDP socket %d failed.", fd);
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
- } else if (rc) {
- // data received
- statsd.udp_socket_reads++;
- statsd.udp_packets_received += rc;
-
- size_t i;
- for (i = 0; i < (size_t)rc; ++i) {
- size_t len = (size_t)d->msgs[i].msg_len;
- statsd.udp_bytes_read += len;
- statsd_process(d->msgs[i].msg_hdr.msg_iov->iov_base, len, 0);
- }
- }
- } while (rc != -1);
-
-#else // !HAVE_RECVMMSG
- ssize_t rc;
- do {
- rc = recv(fd, d->buffer, STATSD_UDP_BUFFER_SIZE - 1, MSG_DONTWAIT);
- if (rc < 0) {
- // read failed
- if (errno != EWOULDBLOCK && errno != EAGAIN && errno != EINTR) {
- netdata_log_error("STATSD: recv() on UDP socket %d failed.", fd);
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
- } else if (rc) {
- // data received
- statsd.udp_socket_reads++;
- statsd.udp_packets_received++;
- statsd.udp_bytes_read += rc;
- statsd_process(d->buffer, (size_t) rc, 0);
- }
- } while (rc != -1);
-#endif
-
- break;
- }
-
- default: {
- netdata_log_error("STATSD: internal error: unknown socktype %d on socket %d", pi->socktype, fd);
- statsd.socket_errors++;
- retval = -1;
- goto cleanup;
- }
- }
-
- retval = 0;
-cleanup:
- worker_is_idle();
- return retval;
-}
-
-static int statsd_snd_callback(POLLINFO *pi, short int *events) {
- (void)pi;
- (void)events;
-
- worker_is_busy(WORKER_JOB_TYPE_SND_DATA);
- netdata_log_error("STATSD: snd_callback() called, but we never requested to send data to statsd clients.");
- worker_is_idle();
-
- return -1;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd child thread to collect metrics from network
-
-void statsd_collector_thread_cleanup(void *data) {
- struct statsd_udp *d = data;
- spinlock_lock(&d->status->spinlock);
- d->status->running = false;
- spinlock_unlock(&d->status->spinlock);
-
- collector_info("cleaning up...");
-
-#ifdef HAVE_RECVMMSG
- size_t i;
- for (i = 0; i < d->size; i++)
- freez(d->iovecs[i].iov_base);
-
- freez(d->iovecs);
- freez(d->msgs);
-#endif
-
- freez(d);
- worker_unregister();
-}
-
-static bool statsd_should_stop(void) {
- return !service_running(SERVICE_COLLECTORS);
-}
-
-void *statsd_collector_thread(void *ptr) {
- struct collection_thread_status *status = ptr;
- spinlock_lock(&status->spinlock);
- status->running = true;
- spinlock_unlock(&status->spinlock);
-
- worker_register("STATSD");
- worker_register_job_name(WORKER_JOB_TYPE_TCP_CONNECTED, "tcp connect");
- worker_register_job_name(WORKER_JOB_TYPE_TCP_DISCONNECTED, "tcp disconnect");
- worker_register_job_name(WORKER_JOB_TYPE_RCV_DATA, "receive");
- worker_register_job_name(WORKER_JOB_TYPE_SND_DATA, "send");
-
- collector_info("STATSD collector thread started with taskid %d", gettid());
-
- struct statsd_udp *d = callocz(sizeof(struct statsd_udp), 1);
- d->status = status;
-
- netdata_thread_cleanup_push(statsd_collector_thread_cleanup, d);
-
-#ifdef HAVE_RECVMMSG
- d->type = STATSD_SOCKET_DATA_TYPE_UDP;
- d->size = statsd.recvmmsg_size;
- d->iovecs = callocz(sizeof(struct iovec), d->size);
- d->msgs = callocz(sizeof(struct mmsghdr), d->size);
-
- size_t i;
- for (i = 0; i < d->size; i++) {
- d->iovecs[i].iov_base = mallocz(STATSD_UDP_BUFFER_SIZE);
- d->iovecs[i].iov_len = STATSD_UDP_BUFFER_SIZE - 1;
- d->msgs[i].msg_hdr.msg_iov = &d->iovecs[i];
- d->msgs[i].msg_hdr.msg_iovlen = 1;
- }
-#endif
-
- poll_events(&statsd.sockets
- , statsd_add_callback
- , statsd_del_callback
- , statsd_rcv_callback
- , statsd_snd_callback
- , NULL
- , statsd_should_stop
- , NULL // No access control pattern
- , 0 // No dns lookups for access control pattern
- , (void *)d
- , 0 // tcp request timeout, 0 = disabled
- , statsd.tcp_idle_timeout // tcp idle timeout, 0 = disabled
- , statsd.update_every * 1000
- , ptr // timer_data
- , status->max_sockets
- );
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd applications configuration files parsing
-
-#define STATSD_CONF_LINE_MAX 8192
-
-static STATSD_APP_CHART_DIM_VALUE_TYPE string2valuetype(const char *type, size_t line, const char *filename) {
- if(!type || !*type) type = "last";
-
- if(!strcmp(type, "events")) return STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS;
- else if(!strcmp(type, "last")) return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
- else if(!strcmp(type, "min")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MIN;
- else if(!strcmp(type, "max")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MAX;
- else if(!strcmp(type, "sum")) return STATSD_APP_CHART_DIM_VALUE_TYPE_SUM;
- else if(!strcmp(type, "average")) return STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE;
- else if(!strcmp(type, "median")) return STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN;
- else if(!strcmp(type, "stddev")) return STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV;
- else if(!strcmp(type, "percentile")) return STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE;
-
- netdata_log_error("STATSD: invalid type '%s' at line %zu of file '%s'. Using 'last'.", type, line, filename);
- return STATSD_APP_CHART_DIM_VALUE_TYPE_LAST;
-}
-
-static const char *valuetype2string(STATSD_APP_CHART_DIM_VALUE_TYPE type) {
- switch(type) {
- case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS: return "events";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST: return "last";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN: return "min";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX: return "max";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM: return "sum";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE: return "average";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN: return "median";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV: return "stddev";
- case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE: return "percentile";
- }
-
- return "unknown";
-}
-
-static STATSD_APP_CHART_DIM *add_dimension_to_app_chart(
- STATSD_APP *app __maybe_unused
- , STATSD_APP_CHART *chart
- , const char *metric_name
- , const char *dim_name
- , collected_number multiplier
- , collected_number divisor
- , RRDDIM_FLAGS flags
- , RRDDIM_OPTIONS options
- , STATSD_APP_CHART_DIM_VALUE_TYPE value_type
-) {
- STATSD_APP_CHART_DIM *dim = callocz(sizeof(STATSD_APP_CHART_DIM), 1);
-
- dim->metric = strdupz(metric_name);
- dim->metric_hash = simple_hash(dim->metric);
-
- dim->name = strdupz((dim_name)?dim_name:"");
- dim->multiplier = multiplier;
- dim->divisor = divisor;
- dim->value_type = value_type;
- dim->flags = flags;
- dim->options = options;
-
- if(!dim->multiplier)
- dim->multiplier = 1;
-
- if(!dim->divisor)
- dim->divisor = 1;
-
- // append it to the list of dimension
- STATSD_APP_CHART_DIM *tdim;
- for(tdim = chart->dimensions; tdim && tdim->next ; tdim = tdim->next) ;
- if(!tdim) {
- dim->next = chart->dimensions;
- chart->dimensions = dim;
- }
- else {
- dim->next = tdim->next;
- tdim->next = dim;
- }
- chart->dimensions_count++;
-
- netdata_log_debug(D_STATSD, "Added dimension '%s' to chart '%s' of app '%s', for metric '%s', with type %u, multiplier %d, divisor %d",
- dim->name, chart->id, app->name, dim->metric, dim->value_type, dim->multiplier, dim->divisor);
-
- return dim;
-}
-
-static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHART *chart, DICTIONARY *dict) {
- netdata_log_debug(D_STATSD, "STATSD configuration reading file '%s'", filename);
-
- char *buffer = mallocz(STATSD_CONF_LINE_MAX + 1);
-
- FILE *fp = fopen(filename, "r");
- if(!fp) {
- netdata_log_error("STATSD: cannot open file '%s'.", filename);
- freez(buffer);
- return -1;
- }
-
- size_t line = 0;
- char *s;
- while(fgets(buffer, STATSD_CONF_LINE_MAX, fp) != NULL) {
- buffer[STATSD_CONF_LINE_MAX] = '\0';
- line++;
-
- s = trim(buffer);
- if (!s || *s == '#') {
- netdata_log_debug(D_STATSD, "STATSD: ignoring line %zu of file '%s', it is empty.", line, filename);
- continue;
- }
-
- netdata_log_debug(D_STATSD, "STATSD: processing line %zu of file '%s': %s", line, filename, buffer);
-
- if(*s == 'i' && strncmp(s, "include", 7) == 0) {
- s = trim(&s[7]);
- if(s && *s) {
- char *tmp;
- if(*s == '/')
- tmp = strdupz(s);
- else {
- // the file to be included is relative to current file
- // find the directory name from the file we already read
- char *filename2 = strdupz(filename); // copy filename, since dirname() will change it
- char *dir = dirname(filename2); // find the directory part of the filename
- tmp = strdupz_path_subpath(dir, s); // compose the new filename to read;
- freez(filename2); // free the filename we copied
- }
- statsd_readfile(tmp, app, chart, dict);
- freez(tmp);
- }
- else
- netdata_log_error("STATSD: ignoring line %zu of file '%s', include filename is empty", line, filename);
-
- continue;
- }
-
- int len = (int) strlen(s);
- if (*s == '[' && s[len - 1] == ']') {
- // new section
- s[len - 1] = '\0';
- s++;
-
- if (!strcmp(s, "app")) {
- // a new app
- app = callocz(sizeof(STATSD_APP), 1);
- app->name = strdupz("unnamed");
- app->rrd_memory_mode = localhost->rrd_memory_mode;
- app->rrd_history_entries = localhost->rrd_history_entries;
-
- app->next = statsd.apps;
- statsd.apps = app;
- chart = NULL;
- dict = NULL;
-
- {
- char lineandfile[FILENAME_MAX + 1];
- snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename);
- app->source = strdupz(lineandfile);
- }
- }
- else if(app) {
- if(!strcmp(s, "dictionary")) {
- if(!app->dict)
- app->dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED, &dictionary_stats_category_collectors, 0);
-
- dict = app->dict;
- }
- else {
- dict = NULL;
-
- // a new chart
- chart = callocz(sizeof(STATSD_APP_CHART), 1);
- netdata_fix_chart_id(s);
- chart->id = strdupz(s);
- chart->name = strdupz(s);
- chart->title = strdupz("Statsd chart");
- chart->context = strdupz(s);
- chart->family = strdupz("overview");
- chart->units = strdupz("value");
- chart->priority = NETDATA_CHART_PRIO_STATSD_PRIVATE;
- chart->chart_type = RRDSET_TYPE_LINE;
-
- chart->next = app->charts;
- app->charts = chart;
-
- if (!strncmp(
- filename,
- netdata_configured_stock_config_dir,
- strlen(netdata_configured_stock_config_dir))) {
- char tmpfilename[FILENAME_MAX + 1];
- strncpyz(tmpfilename, filename, FILENAME_MAX);
- chart->module = strdupz(basename(tmpfilename));
- } else {
- chart->module = strdupz("synthetic_chart");
- }
- }
- }
- else
- netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', [app] is not defined.", line, s, filename);
-
- continue;
- }
-
- if(!app) {
- netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', it is outside all sections.", line, s, filename);
- continue;
- }
-
- char *name = s;
- char *value = strchr(s, '=');
- if(!value) {
- netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s', there is no = in it.", line, s, filename);
- continue;
- }
- *value = '\0';
- value++;
-
- name = trim(name);
- value = trim(value);
-
- if(!name || *name == '#') {
- netdata_log_error("STATSD: ignoring line %zu of file '%s', name is empty.", line, filename);
- continue;
- }
- if(!value) {
- netdata_log_debug(D_CONFIG, "STATSD: ignoring line %zu of file '%s', value is empty.", line, filename);
- continue;
- }
-
- if(unlikely(dict)) {
- // parse [dictionary] members
-
- dictionary_set(dict, name, value, strlen(value) + 1);
- }
- else if(!chart) {
- // parse [app] members
-
- if(!strcmp(name, "name")) {
- freez((void *)app->name);
- netdata_fix_chart_name(value);
- app->name = strdupz(value);
- }
- else if (!strcmp(name, "metrics")) {
- simple_pattern_free(app->metrics);
- app->metrics = simple_pattern_create(value, NULL, SIMPLE_PATTERN_EXACT, true);
- }
- else if (!strcmp(name, "private charts")) {
- if (!strcmp(value, "yes") || !strcmp(value, "on"))
- app->default_options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- else
- app->default_options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- }
- else if (!strcmp(name, "gaps when not collected")) {
- if (!strcmp(value, "yes") || !strcmp(value, "on"))
- app->default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
- }
- else if (!strcmp(name, "memory mode")) {
- // this is not supported anymore
- // with the implementation of storage engines, all charts have the same storage engine always
- // app->rrd_memory_mode = rrd_memory_mode_id(value);
- ;
- }
- else if (!strcmp(name, "history")) {
- app->rrd_history_entries = atol(value);
- if (app->rrd_history_entries < 5)
- app->rrd_history_entries = 5;
- }
- else {
- netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [app] section.", line, name, filename);
- continue;
- }
- }
- else {
- // parse [chart] members
-
- if(!strcmp(name, "name")) {
- freez((void *)chart->name);
- netdata_fix_chart_id(value);
- chart->name = strdupz(value);
- }
- else if(!strcmp(name, "title")) {
- freez((void *)chart->title);
- chart->title = strdupz(value);
- }
- else if (!strcmp(name, "family")) {
- freez((void *)chart->family);
- chart->family = strdupz(value);
- }
- else if (!strcmp(name, "context")) {
- freez((void *)chart->context);
- netdata_fix_chart_id(value);
- chart->context = strdupz(value);
- }
- else if (!strcmp(name, "units")) {
- freez((void *)chart->units);
- chart->units = strdupz(value);
- }
- else if (!strcmp(name, "priority")) {
- chart->priority = atol(value);
- }
- else if (!strcmp(name, "type")) {
- chart->chart_type = rrdset_type_id(value);
- }
- else if (!strcmp(name, "dimension")) {
- // metric [name [type [multiplier [divisor]]]]
- char *words[10] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(value, words, 10);
-
- int pattern = 0;
- size_t i = 0;
- char *metric_name = get_word(words, num_words, i++);
-
- if(strcmp(metric_name, "pattern") == 0) {
- metric_name = get_word(words, num_words, i++);
- pattern = 1;
- }
-
- char *dim_name = get_word(words, num_words, i++);
- char *type = get_word(words, num_words, i++);
- char *multiplier = get_word(words, num_words, i++);
- char *divisor = get_word(words, num_words, i++);
- char *opts = get_word(words, num_words, i++);
-
- RRDDIM_FLAGS flags = RRDDIM_FLAG_NONE;
- RRDDIM_OPTIONS options = RRDDIM_OPTION_NONE;
- if(opts && *opts) {
- if(strstr(opts, "hidden") != NULL) options |= RRDDIM_OPTION_HIDDEN;
- if(strstr(opts, "noreset") != NULL) options |= RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS;
- if(strstr(opts, "nooverflow") != NULL) options |= RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS;
- }
-
- if(!pattern) {
- if(app->dict) {
- if(dim_name && *dim_name) {
- char *n = dictionary_get(app->dict, dim_name);
- if(n) dim_name = n;
- }
- else {
- dim_name = dictionary_get(app->dict, metric_name);
- }
- }
-
- if(!dim_name || !*dim_name)
- dim_name = metric_name;
- }
-
- STATSD_APP_CHART_DIM *dim = add_dimension_to_app_chart(
- app
- , chart
- , metric_name
- , dim_name
- , (multiplier && *multiplier)?str2l(multiplier):1
- , (divisor && *divisor)?str2l(divisor):1
- , flags
- ,
- options, string2valuetype(type, line, filename)
- );
-
- if(pattern)
- dim->metric_pattern = simple_pattern_create(dim->metric, NULL, SIMPLE_PATTERN_EXACT, true);
- }
- else {
- netdata_log_error("STATSD: ignoring line %zu ('%s') of file '%s'. Unknown keyword for the [%s] section.", line, name, filename, chart->id);
- continue;
- }
- }
- }
-
- freez(buffer);
- fclose(fp);
- return 0;
-}
-
-static int statsd_file_callback(const char *filename, void *data) {
- (void)data;
- return statsd_readfile(filename, NULL, NULL, NULL);
-}
-
-static inline void statsd_readdir(const char *user_path, const char *stock_path, const char *subpath) {
- recursive_config_double_dir_load(user_path, stock_path, subpath, statsd_file_callback, NULL, 0);
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// send metrics to netdata - in private charts - called from the main thread
-
-// extract chart type and chart id from metric name
-static inline void statsd_get_metric_type_and_id(STATSD_METRIC *m, char *type, char *id, char *context, const char *metrictype, size_t len) {
-
- // The full chart type.id looks like this:
- // ${STATSD_CHART_PREFIX} + "_" + ${METRIC_NAME} + "_" + ${METRIC_TYPE}
- //
- // where:
- // STATSD_CHART_PREFIX = "statsd" as defined above
- // METRIC_NAME = whatever the user gave to statsd
- // METRIC_TYPE = "gauge", "counter", "meter", "timer", "histogram", "set", "dictionary"
-
- // for chart type, we want:
- // ${STATSD_CHART_PREFIX} + "_" + the first word of ${METRIC_NAME}
-
- // find the first word of ${METRIC_NAME}
- char firstword[len + 1], *s = "";
- strncpyz(firstword, m->name, len);
- for (s = firstword; *s ; s++) {
- if (unlikely(*s == '.' || *s == '_')) {
- *s = '\0';
- s++;
- break;
- }
- }
- // firstword has the first word of ${METRIC_NAME}
- // s has the remaining, if any
-
- // create the chart type:
- snprintfz(type, len, STATSD_CHART_PREFIX "_%s", firstword);
-
- // for chart id, we want:
- // the remaining of the words of ${METRIC_NAME} + "_" + ${METRIC_TYPE}
- // or the ${METRIC_NAME} has no remaining words, the ${METRIC_TYPE} alone
- if(*s)
- snprintfz(id, len, "%s_%s", s, metrictype);
- else
- snprintfz(id, len, "%s", metrictype);
-
- // for the context, we want the full of both the above, separated with a dot (type.id):
- snprintfz(context, RRD_ID_LENGTH_MAX, "%s.%s", type, id);
-
- // make sure they don't have illegal characters
- netdata_fix_chart_id(type);
- netdata_fix_chart_id(id);
- netdata_fix_chart_id(context);
-}
-
-static inline RRDSET *statsd_private_rrdset_create(
- STATSD_METRIC *m __maybe_unused
- , const char *type
- , const char *id
- , const char *name
- , const char *family
- , const char *context
- , const char *title
- , const char *units
- , long priority
- , int update_every
- , RRDSET_TYPE chart_type
-) {
- if(!m->st)
- statsd.private_charts++;
-
- RRDSET *st = rrdset_create_custom(
- localhost // host
- , type // type
- , id // id
- , name // name
- , family // family
- , context // context
- , title // title
- , units // units
- , PLUGIN_STATSD_NAME // plugin
- , "private_chart" // module
- , priority // priority
- , update_every // update every
- , chart_type // chart type
- , default_rrd_memory_mode // memory mode
- , default_rrd_history_entries // history
- );
- rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
-
- if(statsd.private_charts_hidden)
- rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
-
- // rrdset_flag_set(st, RRDSET_FLAG_DEBUG);
- return st;
-}
-
-static inline void statsd_private_chart_gauge(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "updating private chart for gauge metric '%s'", m->name);
-
- if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE)))
- return;
-
- if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) {
- m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
-
- char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- statsd_get_metric_type_and_id(m, type, id, context, "gauge", RRD_ID_LENGTH_MAX);
-
- char title[RRD_ID_LENGTH_MAX + 1];
- snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for gauge %s", m->name);
-
- m->st = statsd_private_rrdset_create(
- m
- , type
- , id
- , NULL // name
- , m->family?m->family:"gauges" // family (submenu)
- , context // context
- , title // title
- , m->units?m->units:"value" // units
- , NETDATA_CHART_PRIO_STATSD_PRIVATE
- , statsd.update_every
- , RRDSET_TYPE_LINE
- );
-
- m->rd_value = rrddim_add(m->st, "gauge", m->dimname?m->dimname:NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
-
- if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
- m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(m->st, m->rd_value, m->last);
-
- if(m->rd_count)
- rrddim_set_by_pointer(m->st, m->rd_count, m->events);
-
- rrdset_done(m->st);
-}
-
-static inline void statsd_private_chart_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
- netdata_log_debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
-
- if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE)))
- return;
-
- if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) {
- m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
-
- char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- statsd_get_metric_type_and_id(m, type, id, context, dim, RRD_ID_LENGTH_MAX);
-
- char title[RRD_ID_LENGTH_MAX + 1];
- snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
-
- m->st = statsd_private_rrdset_create(
- m
- , type
- , id
- , NULL // name
- , m->family?m->family:family // family (submenu)
- , context // context
- , title // title
- , m->units?m->units:"events/s" // units
- , NETDATA_CHART_PRIO_STATSD_PRIVATE
- , statsd.update_every
- , RRDSET_TYPE_AREA
- );
-
- m->rd_value = rrddim_add(m->st, dim, m->dimname?m->dimname:NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
- m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(m->st, m->rd_value, m->last);
-
- if(m->rd_count)
- rrddim_set_by_pointer(m->st, m->rd_count, m->events);
-
- rrdset_done(m->st);
-}
-
-static inline void statsd_private_chart_set(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "updating private chart for set metric '%s'", m->name);
-
- if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE)))
- return;
-
- if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) {
- m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
-
- char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- statsd_get_metric_type_and_id(m, type, id, context, "set", RRD_ID_LENGTH_MAX);
-
- char title[RRD_ID_LENGTH_MAX + 1];
- snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for set %s", m->name);
-
- m->st = statsd_private_rrdset_create(
- m
- , type
- , id
- , NULL // name
- , m->family?m->family:"sets" // family (submenu)
- , context // context
- , title // title
- , m->units?m->units:"entries" // units
- , NETDATA_CHART_PRIO_STATSD_PRIVATE
- , statsd.update_every
- , RRDSET_TYPE_LINE
- );
-
- m->rd_value = rrddim_add(m->st, "set", m->dimname?m->dimname:"unique", 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
- m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(m->st, m->rd_value, m->last);
-
- if(m->rd_count)
- rrddim_set_by_pointer(m->st, m->rd_count, m->events);
-
- rrdset_done(m->st);
-}
-
-static inline void statsd_private_chart_dictionary(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "updating private chart for dictionary metric '%s'", m->name);
-
- if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE)))
- return;
-
- if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) {
- m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
-
- char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- statsd_get_metric_type_and_id(m, type, id, context, "dictionary", RRD_ID_LENGTH_MAX);
-
- char title[RRD_ID_LENGTH_MAX + 1];
- snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for dictionary %s", m->name);
-
- m->st = statsd_private_rrdset_create(
- m
- , type
- , id
- , NULL // name
- , m->family?m->family:"dictionaries" // family (submenu)
- , context // context
- , title // title
- , m->units?m->units:"events/s" // units
- , NETDATA_CHART_PRIO_STATSD_PRIVATE
- , statsd.update_every
- , RRDSET_TYPE_STACKED
- );
-
- if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
- m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- STATSD_METRIC_DICTIONARY_ITEM *t;
- dfe_start_read(m->dictionary.dict, t) {
- if (!t->rd) t->rd = rrddim_add(m->st, t_dfe.name, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_set_by_pointer(m->st, t->rd, (collected_number)t->count);
- }
- dfe_done(t);
-
- if(m->rd_count)
- rrddim_set_by_pointer(m->st, m->rd_count, m->events);
-
- rrdset_done(m->st);
-}
-
-static inline void statsd_private_chart_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
- netdata_log_debug(D_STATSD, "updating private chart for %s metric '%s'", dim, m->name);
-
- if(m->st && unlikely(rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE)))
- return;
-
- if(unlikely(!m->st || m->options & STATSD_METRIC_OPTION_UPDATED_CHART_METADATA)) {
- m->options &= ~STATSD_METRIC_OPTION_UPDATED_CHART_METADATA;
-
- char type[RRD_ID_LENGTH_MAX + 1], id[RRD_ID_LENGTH_MAX + 1], context[RRD_ID_LENGTH_MAX + 1];
- statsd_get_metric_type_and_id(m, type, id, context, dim, RRD_ID_LENGTH_MAX);
-
- char title[RRD_ID_LENGTH_MAX + 1];
- snprintfz(title, RRD_ID_LENGTH_MAX, "statsd private chart for %s %s", dim, m->name);
-
- m->st = statsd_private_rrdset_create(
- m
- , type
- , id
- , NULL // name
- , m->family?m->family:family // family (submenu)
- , context // context
- , title // title
- , m->units?m->units:units // units
- , NETDATA_CHART_PRIO_STATSD_PRIVATE
- , statsd.update_every
- , RRDSET_TYPE_AREA
- );
-
- m->histogram.ext->rd_min = rrddim_add(m->st, "min", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_max = rrddim_add(m->st, "max", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- m->rd_value = rrddim_add(m->st, "average", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_percentile = rrddim_add(m->st, statsd.histogram_percentile_str, NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_median = rrddim_add(m->st, "median", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- m->histogram.ext->rd_stddev = rrddim_add(m->st, "stddev", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
- //m->histogram.ext->rd_sum = rrddim_add(m->st, "sum", NULL, 1, statsd.decimal_detail, RRD_ALGORITHM_ABSOLUTE);
-
- if(m->options & STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT)
- m->rd_count = rrddim_add(m->st, "events", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(m->st, m->histogram.ext->rd_min, m->histogram.ext->last_min);
- rrddim_set_by_pointer(m->st, m->histogram.ext->rd_max, m->histogram.ext->last_max);
- rrddim_set_by_pointer(m->st, m->histogram.ext->rd_percentile, m->histogram.ext->last_percentile);
- rrddim_set_by_pointer(m->st, m->histogram.ext->rd_median, m->histogram.ext->last_median);
- rrddim_set_by_pointer(m->st, m->histogram.ext->rd_stddev, m->histogram.ext->last_stddev);
- //rrddim_set_by_pointer(m->st, m->histogram.ext->rd_sum, m->histogram.ext->last_sum);
- rrddim_set_by_pointer(m->st, m->rd_value, m->last);
-
- if(m->rd_count)
- rrddim_set_by_pointer(m->st, m->rd_count, m->events);
-
- rrdset_done(m->st);
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// statsd flush metrics
-
-static inline void metric_check_obsoletion(STATSD_METRIC *m) {
- if(statsd.set_obsolete_after &&
- !rrdset_flag_check(m->st, RRDSET_FLAG_OBSOLETE) &&
- m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED &&
- m->last_collected + statsd.set_obsolete_after < now_realtime_sec()) {
- rrdset_is_obsolete___safe_from_collector_thread(m->st);
- m->options |= STATSD_METRIC_OPTION_OBSOLETE;
- }
-}
-
-static inline void statsd_flush_gauge(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "flushing gauge metric '%s'", m->name);
-
- int updated = 0;
- if(unlikely(!m->reset && m->count)) {
- m->last = (collected_number) (m->gauge.value * statsd.decimal_detail);
-
- m->reset = 1;
- updated = 1;
- }
-
- if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
- statsd_private_chart_gauge(m);
-
- metric_check_obsoletion(m);
-}
-
-static inline void statsd_flush_counter_or_meter(STATSD_METRIC *m, const char *dim, const char *family) {
- netdata_log_debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
-
- int updated = 0;
- if(unlikely(!m->reset && m->count)) {
- m->last = m->counter.value;
-
- m->reset = 1;
- updated = 1;
- }
-
- if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
- statsd_private_chart_counter_or_meter(m, dim, family);
-
- metric_check_obsoletion(m);
-}
-
-static inline void statsd_flush_counter(STATSD_METRIC *m) {
- statsd_flush_counter_or_meter(m, "counter", "counters");
-}
-
-static inline void statsd_flush_meter(STATSD_METRIC *m) {
- statsd_flush_counter_or_meter(m, "meter", "meters");
-}
-
-static inline void statsd_flush_set(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "flushing set metric '%s'", m->name);
-
- int updated = 0;
- if(unlikely(!m->reset && m->count)) {
- m->last = (collected_number)dictionary_entries(m->set.dict);
-
- m->reset = 1;
- updated = 1;
- }
- else {
- m->last = 0;
- }
-
- if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
- statsd_private_chart_set(m);
-
- metric_check_obsoletion(m);
-}
-
-static inline void statsd_flush_dictionary(STATSD_METRIC *m) {
- netdata_log_debug(D_STATSD, "flushing dictionary metric '%s'", m->name);
-
- int updated = 0;
- if(unlikely(!m->reset && m->count)) {
- m->last = (collected_number)dictionary_entries(m->dictionary.dict);
-
- m->reset = 1;
- updated = 1;
- }
- else {
- m->last = 0;
- }
-
- if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
- statsd_private_chart_dictionary(m);
-
- if(dictionary_entries(m->dictionary.dict) >= statsd.dictionary_max_unique) {
- if(!(m->options & STATSD_METRIC_OPTION_COLLECTION_FULL_LOGGED)) {
- m->options |= STATSD_METRIC_OPTION_COLLECTION_FULL_LOGGED;
- collector_info(
- "STATSD dictionary '%s' reach max of %zu items - try increasing 'dictionaries max unique dimensions' in netdata.conf",
- m->name,
- dictionary_entries(m->dictionary.dict));
- }
- }
-
- metric_check_obsoletion(m);
-}
-
-static inline void statsd_flush_timer_or_histogram(STATSD_METRIC *m, const char *dim, const char *family, const char *units) {
- netdata_log_debug(D_STATSD, "flushing %s metric '%s'", dim, m->name);
-
- int updated = 0;
- if(unlikely(!m->reset && m->count && m->histogram.ext->used > 0)) {
- netdata_mutex_lock(&m->histogram.ext->mutex);
-
- size_t len = m->histogram.ext->used;
- NETDATA_DOUBLE *series = m->histogram.ext->values;
- sort_series(series, len);
-
- m->histogram.ext->last_min = (collected_number)roundndd(series[0] * statsd.decimal_detail);
- m->histogram.ext->last_max = (collected_number)roundndd(series[len - 1] * statsd.decimal_detail);
- m->last = (collected_number)roundndd(average(series, len) * statsd.decimal_detail);
- m->histogram.ext->last_median = (collected_number)roundndd(median_on_sorted_series(series, len) * statsd.decimal_detail);
- m->histogram.ext->last_stddev = (collected_number)roundndd(standard_deviation(series, len) * statsd.decimal_detail);
- m->histogram.ext->last_sum = (collected_number)roundndd(sum(series, len) * statsd.decimal_detail);
-
- size_t pct_len = (size_t)floor((double)len * statsd.histogram_percentile / 100.0);
- if(pct_len < 1)
- m->histogram.ext->last_percentile = (collected_number)(series[0] * statsd.decimal_detail);
- else
- m->histogram.ext->last_percentile = (collected_number)roundndd(series[pct_len - 1] * statsd.decimal_detail);
-
- netdata_mutex_unlock(&m->histogram.ext->mutex);
-
- netdata_log_debug(D_STATSD, "STATSD %s metric %s: min " COLLECTED_NUMBER_FORMAT ", max " COLLECTED_NUMBER_FORMAT ", last " COLLECTED_NUMBER_FORMAT ", pcent " COLLECTED_NUMBER_FORMAT ", median " COLLECTED_NUMBER_FORMAT ", stddev " COLLECTED_NUMBER_FORMAT ", sum " COLLECTED_NUMBER_FORMAT,
- dim, m->name, m->histogram.ext->last_min, m->histogram.ext->last_max, m->last, m->histogram.ext->last_percentile, m->histogram.ext->last_median, m->histogram.ext->last_stddev, m->histogram.ext->last_sum);
-
- m->histogram.ext->zeroed = 0;
- m->reset = 1;
- updated = 1;
- }
- else if(unlikely(!m->histogram.ext->zeroed)) {
- // reset the metrics
- // if we collected anything, they will be updated below
- // this ensures that we report zeros if nothing is collected
-
- m->histogram.ext->last_min = 0;
- m->histogram.ext->last_max = 0;
- m->last = 0;
- m->histogram.ext->last_median = 0;
- m->histogram.ext->last_stddev = 0;
- m->histogram.ext->last_sum = 0;
- m->histogram.ext->last_percentile = 0;
-
- m->histogram.ext->zeroed = 1;
- }
-
- if(unlikely(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED && (updated || !(m->options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED))))
- statsd_private_chart_timer_or_histogram(m, dim, family, units);
-
- metric_check_obsoletion(m);
-}
-
-static inline void statsd_flush_timer(STATSD_METRIC *m) {
- statsd_flush_timer_or_histogram(m, "timer", "timers", "milliseconds");
-}
-
-static inline void statsd_flush_histogram(STATSD_METRIC *m) {
- statsd_flush_timer_or_histogram(m, "histogram", "histograms", "value");
-}
-
-static inline RRD_ALGORITHM statsd_algorithm_for_metric(STATSD_METRIC *m) {
- switch(m->type) {
- default:
- case STATSD_METRIC_TYPE_GAUGE:
- case STATSD_METRIC_TYPE_SET:
- case STATSD_METRIC_TYPE_TIMER:
- case STATSD_METRIC_TYPE_HISTOGRAM:
- return RRD_ALGORITHM_ABSOLUTE;
-
- case STATSD_METRIC_TYPE_METER:
- case STATSD_METRIC_TYPE_COUNTER:
- case STATSD_METRIC_TYPE_DICTIONARY:
- return RRD_ALGORITHM_INCREMENTAL;
- }
-}
-
-static inline void link_metric_to_app_dimension(STATSD_APP *app, STATSD_METRIC *m, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
- if(dim->value_type == STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS) {
- dim->value_ptr = &m->events;
- dim->algorithm = RRD_ALGORITHM_INCREMENTAL;
- }
- else if(m->type == STATSD_METRIC_TYPE_HISTOGRAM || m->type == STATSD_METRIC_TYPE_TIMER) {
- dim->algorithm = RRD_ALGORITHM_ABSOLUTE;
- dim->divisor *= statsd.decimal_detail;
-
- switch(dim->value_type) {
- case STATSD_APP_CHART_DIM_VALUE_TYPE_EVENTS:
- // will never match - added to avoid warning
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_LAST:
- case STATSD_APP_CHART_DIM_VALUE_TYPE_AVERAGE:
- dim->value_ptr = &m->last;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_SUM:
- dim->value_ptr = &m->histogram.ext->last_sum;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MIN:
- dim->value_ptr = &m->histogram.ext->last_min;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MAX:
- dim->value_ptr = &m->histogram.ext->last_max;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_MEDIAN:
- dim->value_ptr = &m->histogram.ext->last_median;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_PERCENTILE:
- dim->value_ptr = &m->histogram.ext->last_percentile;
- break;
-
- case STATSD_APP_CHART_DIM_VALUE_TYPE_STDDEV:
- dim->value_ptr = &m->histogram.ext->last_stddev;
- break;
- }
- }
- else {
- if (dim->value_type != STATSD_APP_CHART_DIM_VALUE_TYPE_LAST)
- netdata_log_error("STATSD: unsupported value type for dimension '%s' of chart '%s' of app '%s' on metric '%s'", dim->name, chart->id, app->name, m->name);
-
- dim->value_ptr = &m->last;
- dim->algorithm = statsd_algorithm_for_metric(m);
-
- if(m->type == STATSD_METRIC_TYPE_GAUGE)
- dim->divisor *= statsd.decimal_detail;
- }
-
- if(unlikely(chart->st && dim->rd)) {
- rrddim_set_algorithm(chart->st, dim->rd, dim->algorithm);
- rrddim_set_multiplier(chart->st, dim->rd, dim->multiplier);
- rrddim_set_divisor(chart->st, dim->rd, dim->divisor);
- }
-
- chart->dimensions_linked_count++;
- m->options |= STATSD_METRIC_OPTION_USED_IN_APPS;
- netdata_log_debug(D_STATSD, "metric '%s' of type %u linked with app '%s', chart '%s', dimension '%s', algorithm '%s'", m->name, m->type, app->name, chart->id, dim->name, rrd_algorithm_name(dim->algorithm));
-}
-
-static inline void check_if_metric_is_for_app(STATSD_INDEX *index, STATSD_METRIC *m) {
- (void)index;
-
- STATSD_APP *app;
- for(app = statsd.apps; app ;app = app->next) {
- if(unlikely(simple_pattern_matches(app->metrics, m->name))) {
- netdata_log_debug(D_STATSD, "metric '%s' matches app '%s'", m->name, app->name);
-
- // the metric should get the options from the app
-
- if(app->default_options & STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED)
- m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- else
- m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
-
- if(app->default_options & STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED)
- m->options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
- else
- m->options &= ~STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
-
- // check if there is a chart in this app, willing to get this metric
- STATSD_APP_CHART *chart;
- for(chart = app->charts; chart; chart = chart->next) {
-
- STATSD_APP_CHART_DIM *dim;
- for(dim = chart->dimensions; dim ; dim = dim->next) {
- if(unlikely(dim->metric_pattern)) {
- size_t dim_name_len = strlen(dim->name);
- size_t wildcarded_len = dim_name_len + strlen(m->name) + 1;
- char wildcarded[wildcarded_len];
-
- strcpy(wildcarded, dim->name);
- char *ws = &wildcarded[dim_name_len];
-
- if(simple_pattern_matches_extract(dim->metric_pattern, m->name, ws, wildcarded_len - dim_name_len) == SP_MATCHED_POSITIVE) {
-
- char *final_name = NULL;
-
- if(app->dict) {
- if(likely(*wildcarded)) {
- // use the name of the wildcarded string
- final_name = dictionary_get(app->dict, wildcarded);
- }
-
- if(unlikely(!final_name)) {
- // use the name of the metric
- final_name = dictionary_get(app->dict, m->name);
- }
- }
-
- if(unlikely(!final_name))
- final_name = wildcarded;
-
- add_dimension_to_app_chart(
- app
- , chart
- , m->name
- , final_name
- , dim->multiplier
- , dim->divisor
- , dim->flags
- , dim->options
- , dim->value_type
- );
-
- // the new dimension is appended to the list
- // so, it will be matched and linked later too
- }
- }
- else if(!dim->value_ptr && dim->metric_hash == m->hash && !strcmp(dim->metric, m->name)) {
- // we have a match - this metric should be linked to this dimension
- link_metric_to_app_dimension(app, m, chart, dim);
- }
- }
-
- }
- }
- }
-}
-
-static inline RRDDIM *statsd_add_dim_to_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart, STATSD_APP_CHART_DIM *dim) {
- (void)app;
-
- // allow the same statsd metric to be added multiple times to the same chart
-
- STATSD_APP_CHART_DIM *tdim;
- size_t count_same_metric = 0, count_same_metric_value_type = 0;
- size_t pos_same_metric_value_type = 0;
-
- for (tdim = chart->dimensions; tdim && tdim->next; tdim = tdim->next) {
- if (dim->metric_hash == tdim->metric_hash && !strcmp(dim->metric, tdim->metric)) {
- count_same_metric++;
-
- if(dim->value_type == tdim->value_type) {
- count_same_metric_value_type++;
- if (tdim == dim)
- pos_same_metric_value_type = count_same_metric_value_type;
- }
- }
- }
-
- if(count_same_metric > 1) {
- // the same metric is found multiple times
-
- size_t len = strlen(dim->metric) + 100;
- char metric[ len + 1 ];
-
- if(count_same_metric_value_type > 1) {
- // the same metric, with the same value type, is added multiple times
- snprintfz(metric, len, "%s_%s%zu", dim->metric, valuetype2string(dim->value_type), pos_same_metric_value_type);
- }
- else {
- // the same metric, with different value type is added
- snprintfz(metric, len, "%s_%s", dim->metric, valuetype2string(dim->value_type));
- }
-
- dim->rd = rrddim_add(chart->st, metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
- if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
- if(dim->options != RRDDIM_OPTION_NONE) dim->rd->collector.options |= dim->options;
- return dim->rd;
- }
-
- dim->rd = rrddim_add(chart->st, dim->metric, dim->name, dim->multiplier, dim->divisor, dim->algorithm);
- if(dim->flags != RRDDIM_FLAG_NONE) dim->rd->flags |= dim->flags;
- if(dim->options != RRDDIM_OPTION_NONE) dim->rd->collector.options |= dim->options;
- return dim->rd;
-}
-
-static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *chart) {
- netdata_log_debug(D_STATSD, "updating chart '%s' for app '%s'", chart->id, app->name);
-
- if(!chart->st) {
- chart->st = rrdset_create_custom(
- localhost // host
- , app->name // type
- , chart->id // id
- , chart->name // name
- , chart->family // family
- , chart->context // context
- , chart->title // title
- , chart->units // units
- , PLUGIN_STATSD_NAME // plugin
- , chart->module // module
- , chart->priority // priority
- , statsd.update_every // update every
- , chart->chart_type // chart type
- , app->rrd_memory_mode // memory mode
- , app->rrd_history_entries // history
- );
-
- rrdset_flag_set(chart->st, RRDSET_FLAG_STORE_FIRST);
- // rrdset_flag_set(chart->st, RRDSET_FLAG_DEBUG);
- }
-
- STATSD_APP_CHART_DIM *dim;
- for(dim = chart->dimensions; dim ;dim = dim->next) {
- if(likely(!dim->metric_pattern)) {
- if (unlikely(!dim->rd))
- statsd_add_dim_to_app_chart(app, chart, dim);
-
- if (unlikely(dim->value_ptr)) {
- netdata_log_debug(D_STATSD, "updating dimension '%s' (%s) of chart '%s' (%s) for app '%s' with value " COLLECTED_NUMBER_FORMAT, dim->name, rrddim_id(dim->rd), chart->id, rrdset_id(chart->st), app->name, *dim->value_ptr);
- rrddim_set_by_pointer(chart->st, dim->rd, *dim->value_ptr);
- }
- }
- }
-
- rrdset_done(chart->st);
- netdata_log_debug(D_STATSD, "completed update of chart '%s' for app '%s'", chart->id, app->name);
-}
-
-static inline void statsd_update_all_app_charts(void) {
- // netdata_log_debug(D_STATSD, "updating app charts");
-
- STATSD_APP *app;
- for(app = statsd.apps; app ;app = app->next) {
- // netdata_log_debug(D_STATSD, "updating charts for app '%s'", app->name);
-
- STATSD_APP_CHART *chart;
- for(chart = app->charts; chart ;chart = chart->next) {
- if(unlikely(chart->dimensions_linked_count)) {
- statsd_update_app_chart(app, chart);
- }
- }
- }
-
- // netdata_log_debug(D_STATSD, "completed update of app charts");
-}
-
-const char *statsd_metric_type_string(STATSD_METRIC_TYPE type) {
- switch(type) {
- case STATSD_METRIC_TYPE_COUNTER: return "counter";
- case STATSD_METRIC_TYPE_GAUGE: return "gauge";
- case STATSD_METRIC_TYPE_HISTOGRAM: return "histogram";
- case STATSD_METRIC_TYPE_METER: return "meter";
- case STATSD_METRIC_TYPE_SET: return "set";
- case STATSD_METRIC_TYPE_DICTIONARY: return "dictionary";
- case STATSD_METRIC_TYPE_TIMER: return "timer";
- default: return "unknown";
- }
-}
-
-static inline void statsd_flush_index_metrics(STATSD_INDEX *index, void (*flush_metric)(STATSD_METRIC *)) {
- STATSD_METRIC *m;
-
- // find the useful metrics (incremental = each time we are called, we check the new metrics only)
- dfe_start_read(index->dict, m) {
- // since we add new metrics at the beginning
- // check for useful charts, until the point we last checked
- if(unlikely(is_metric_checked(m))) break;
-
- if(unlikely(!(m->options & STATSD_METRIC_OPTION_CHECKED_IN_APPS))) {
- nd_log(NDLS_ACCESS, NDLP_DEBUG, "NEW STATSD METRIC '%s': '%s'", statsd_metric_type_string(m->type), m->name);
- check_if_metric_is_for_app(index, m);
- m->options |= STATSD_METRIC_OPTION_CHECKED_IN_APPS;
- }
-
- if(unlikely(!(m->options & STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED))) {
- if(unlikely(statsd.private_charts >= statsd.max_private_charts_hard)) {
- netdata_log_debug(D_STATSD, "STATSD: metric '%s' will not be charted, because the hard limit of the maximum number "
- "of charts has been reached.", m->name);
-
- collector_info("STATSD: metric '%s' will not be charted, because the hard limit of the maximum number "
- "of charts (%u) has been reached. Increase the number of charts by editing netdata.conf, "
- "[statsd] section.", m->name, statsd.max_private_charts_hard);
-
- m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- }
- else {
- if (simple_pattern_matches(statsd.charts_for, m->name)) {
- netdata_log_debug(D_STATSD, "STATSD: metric '%s' will be charted.", m->name);
- m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- } else {
- netdata_log_debug(D_STATSD, "STATSD: metric '%s' will not be charted.", m->name);
- m->options &= ~STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED;
- }
- }
-
- m->options |= STATSD_METRIC_OPTION_PRIVATE_CHART_CHECKED;
- }
-
- // mark it as checked
- m->options |= STATSD_METRIC_OPTION_CHECKED;
-
- // check if it is used in charts
- if((m->options & (STATSD_METRIC_OPTION_PRIVATE_CHART_ENABLED|STATSD_METRIC_OPTION_USED_IN_APPS)) && !(m->options & STATSD_METRIC_OPTION_USEFUL)) {
- m->options |= STATSD_METRIC_OPTION_USEFUL;
- index->useful++;
- m->next_useful = index->first_useful;
- index->first_useful = m;
- }
- }
- dfe_done(m);
-
- // flush all the useful metrics
- STATSD_METRIC *m_prev;
- for(m_prev = m = index->first_useful; m ; m = m->next_useful) {
- flush_metric(m);
- if (m->options & STATSD_METRIC_OPTION_OBSOLETE) {
- if (m == index->first_useful)
- index->first_useful = m->next_useful;
- else
- m_prev->next_useful = m->next_useful;
- dictionary_del(index->dict, m->name);
- index->useful--;
- index->metrics--;
- statsd.private_charts--;
- } else
- m_prev = m;
- }
-}
-
-
-// --------------------------------------------------------------------------------------
-// statsd main thread
-
-static int statsd_listen_sockets_setup(void) {
- return listen_sockets_setup(&statsd.sockets);
-}
-
-static void statsd_main_cleanup(void *data) {
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)data;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- collector_info("cleaning up...");
-
- if (statsd.collection_threads_status) {
- int i;
- for (i = 0; i < statsd.threads; i++) {
- spinlock_lock(&statsd.collection_threads_status[i].spinlock);
- if(statsd.collection_threads_status[i].running) {
- collector_info("STATSD: stopping data collection thread %d...", i + 1);
- netdata_thread_cancel(statsd.collection_threads_status[i].thread);
- }
- else {
- collector_info("STATSD: data collection thread %d found stopped.", i + 1);
- }
- spinlock_unlock(&statsd.collection_threads_status[i].spinlock);
- }
- }
-
- collector_info("STATSD: closing sockets...");
- listen_sockets_close(&statsd.sockets);
-
- // destroy the dictionaries
- dictionary_destroy(statsd.gauges.dict);
- dictionary_destroy(statsd.meters.dict);
- dictionary_destroy(statsd.counters.dict);
- dictionary_destroy(statsd.histograms.dict);
- dictionary_destroy(statsd.dictionaries.dict);
- dictionary_destroy(statsd.sets.dict);
- dictionary_destroy(statsd.timers.dict);
-
- collector_info("STATSD: cleanup completed.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-
- worker_unregister();
-}
-
-#define WORKER_STATSD_FLUSH_GAUGES 0
-#define WORKER_STATSD_FLUSH_COUNTERS 1
-#define WORKER_STATSD_FLUSH_METERS 2
-#define WORKER_STATSD_FLUSH_TIMERS 3
-#define WORKER_STATSD_FLUSH_HISTOGRAMS 4
-#define WORKER_STATSD_FLUSH_SETS 5
-#define WORKER_STATSD_FLUSH_DICTIONARIES 6
-#define WORKER_STATSD_FLUSH_STATS 7
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 8
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 8
-#endif
-
-void *statsd_main(void *ptr) {
- worker_register("STATSDFLUSH");
- worker_register_job_name(WORKER_STATSD_FLUSH_GAUGES, "gauges");
- worker_register_job_name(WORKER_STATSD_FLUSH_COUNTERS, "counters");
- worker_register_job_name(WORKER_STATSD_FLUSH_METERS, "meters");
- worker_register_job_name(WORKER_STATSD_FLUSH_TIMERS, "timers");
- worker_register_job_name(WORKER_STATSD_FLUSH_HISTOGRAMS, "histograms");
- worker_register_job_name(WORKER_STATSD_FLUSH_SETS, "sets");
- worker_register_job_name(WORKER_STATSD_FLUSH_DICTIONARIES, "dictionaries");
- worker_register_job_name(WORKER_STATSD_FLUSH_STATS, "statistics");
-
- netdata_thread_cleanup_push(statsd_main_cleanup, ptr);
-
- statsd.gauges.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.meters.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.counters.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.histograms.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.dictionaries.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.sets.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
- statsd.timers.dict = dictionary_create_advanced(STATSD_DICTIONARY_OPTIONS, &dictionary_stats_category_collectors, 0);
-
- dictionary_register_insert_callback(statsd.gauges.dict, dictionary_metric_insert_callback, &statsd.gauges);
- dictionary_register_insert_callback(statsd.meters.dict, dictionary_metric_insert_callback, &statsd.meters);
- dictionary_register_insert_callback(statsd.counters.dict, dictionary_metric_insert_callback, &statsd.counters);
- dictionary_register_insert_callback(statsd.histograms.dict, dictionary_metric_insert_callback, &statsd.histograms);
- dictionary_register_insert_callback(statsd.dictionaries.dict, dictionary_metric_insert_callback, &statsd.dictionaries);
- dictionary_register_insert_callback(statsd.sets.dict, dictionary_metric_insert_callback, &statsd.sets);
- dictionary_register_insert_callback(statsd.timers.dict, dictionary_metric_insert_callback, &statsd.timers);
-
- dictionary_register_delete_callback(statsd.gauges.dict, dictionary_metric_delete_callback, &statsd.gauges);
- dictionary_register_delete_callback(statsd.meters.dict, dictionary_metric_delete_callback, &statsd.meters);
- dictionary_register_delete_callback(statsd.counters.dict, dictionary_metric_delete_callback, &statsd.counters);
- dictionary_register_delete_callback(statsd.histograms.dict, dictionary_metric_delete_callback, &statsd.histograms);
- dictionary_register_delete_callback(statsd.dictionaries.dict, dictionary_metric_delete_callback, &statsd.dictionaries);
- dictionary_register_delete_callback(statsd.sets.dict, dictionary_metric_delete_callback, &statsd.sets);
- dictionary_register_delete_callback(statsd.timers.dict, dictionary_metric_delete_callback, &statsd.timers);
-
- // ----------------------------------------------------------------------------------------------------------------
- // statsd configuration
-
- statsd.enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, "statsd", statsd.enabled);
-
- statsd.update_every = default_rrd_update_every;
- statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
- if(statsd.update_every < default_rrd_update_every) {
- collector_error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
- statsd.update_every = default_rrd_update_every;
- }
-
-#ifdef HAVE_RECVMMSG
- statsd.recvmmsg_size = (size_t)config_get_number(CONFIG_SECTION_STATSD, "udp messages to process at once", (long long)statsd.recvmmsg_size);
-#endif
-
- statsd.charts_for = simple_pattern_create(
- config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL,
- SIMPLE_PATTERN_EXACT, true);
- statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard);
- statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after);
- statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
- statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout);
- statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
-
- statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
- if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
- collector_error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
- statsd.histogram_percentile = 95.0;
- }
- {
- char buffer[314 + 1];
- snprintfz(buffer, sizeof(buffer) - 1, "%0.1f%%", statsd.histogram_percentile);
- statsd.histogram_percentile_str = strdupz(buffer);
- }
-
- statsd.dictionary_max_unique = config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique);
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 0)) {
- statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.counters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.meters.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.sets.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.histograms.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.timers.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- statsd.dictionaries.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
- }
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on gauges (deleteGauges)", 0))
- statsd.gauges.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on counters (deleteCounters)", 0))
- statsd.counters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on meters (deleteMeters)", 0))
- statsd.meters.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on sets (deleteSets)", 0))
- statsd.sets.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on histograms (deleteHistograms)", 0))
- statsd.histograms.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on timers (deleteTimers)", 0))
- statsd.timers.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- if(config_get_boolean(CONFIG_SECTION_STATSD, "gaps on dictionaries (deleteDictionaries)", 0))
- statsd.dictionaries.default_options |= STATSD_METRIC_OPTION_SHOW_GAPS_WHEN_NOT_COLLECTED;
-
- size_t max_sockets = (size_t)config_get_number(CONFIG_SECTION_STATSD, "statsd server max TCP sockets", (long long int)(rlimit_nofile.rlim_cur / 4));
-
-#ifdef STATSD_MULTITHREADED
- statsd.threads = (int)config_get_number(CONFIG_SECTION_STATSD, "threads", processors);
- if(statsd.threads < 1) {
- collector_error("STATSD: Invalid number of threads %d, using %d", statsd.threads, processors);
- statsd.threads = processors;
- config_set_number(CONFIG_SECTION_STATSD, "collector threads", statsd.threads);
- }
-#else
- statsd.threads = 1;
-#endif
-
- // read custom application definitions
- statsd_readdir(netdata_configured_user_config_dir, netdata_configured_stock_config_dir, "statsd.d");
-
- // ----------------------------------------------------------------------------------------------------------------
- // statsd setup
-
- if(!statsd.enabled) goto cleanup;
-
- statsd_listen_sockets_setup();
- if(!statsd.sockets.opened) {
- collector_error("STATSD: No statsd sockets to listen to. statsd will be disabled.");
- goto cleanup;
- }
-
- statsd.collection_threads_status = callocz((size_t)statsd.threads, sizeof(struct collection_thread_status));
-
- int i;
- for(i = 0; i < statsd.threads ;i++) {
- statsd.collection_threads_status[i].max_sockets = max_sockets / statsd.threads;
- char tag[NETDATA_THREAD_TAG_MAX + 1];
- snprintfz(tag, NETDATA_THREAD_TAG_MAX, "STATSD_IN[%d]", i + 1);
- spinlock_init(&statsd.collection_threads_status[i].spinlock);
- netdata_thread_create(&statsd.collection_threads_status[i].thread, tag, NETDATA_THREAD_OPTION_DEFAULT, statsd_collector_thread, &statsd.collection_threads_status[i]);
- }
-
- // ----------------------------------------------------------------------------------------------------------------
- // statsd monitoring charts
-
- RRDSET *st_metrics = NULL;
- RRDDIM *rd_metrics_gauge = NULL;
- RRDDIM *rd_metrics_counter = NULL;
- RRDDIM *rd_metrics_timer = NULL;
- RRDDIM *rd_metrics_meter = NULL;
- RRDDIM *rd_metrics_histogram = NULL;
- RRDDIM *rd_metrics_set = NULL;
- RRDDIM *rd_metrics_dictionary = NULL;
- RRDSET *st_useful_metrics = NULL;
- RRDDIM *rd_useful_metrics_gauge = NULL;
- RRDDIM *rd_useful_metrics_counter = NULL;
- RRDDIM *rd_useful_metrics_timer = NULL;
- RRDDIM *rd_useful_metrics_meter = NULL;
- RRDDIM *rd_useful_metrics_histogram = NULL;
- RRDDIM *rd_useful_metrics_set = NULL;
- RRDDIM *rd_useful_metrics_dictionary = NULL;
- RRDSET *st_events = NULL;
- RRDDIM *rd_events_gauge = NULL;
- RRDDIM *rd_events_counter = NULL;
- RRDDIM *rd_events_timer = NULL;
- RRDDIM *rd_events_meter = NULL;
- RRDDIM *rd_events_histogram = NULL;
- RRDDIM *rd_events_set = NULL;
- RRDDIM *rd_events_dictionary = NULL;
- RRDDIM *rd_events_unknown = NULL;
- RRDDIM *rd_events_errors = NULL;
- RRDSET *st_reads = NULL;
- RRDDIM *rd_reads_tcp = NULL;
- RRDDIM *rd_reads_udp = NULL;
- RRDSET *st_bytes = NULL;
- RRDDIM *rd_bytes_tcp = NULL;
- RRDDIM *rd_bytes_udp = NULL;
- RRDSET *st_packets = NULL;
- RRDDIM *rd_packets_tcp = NULL;
- RRDDIM *rd_packets_udp = NULL;
- RRDSET *st_tcp_connects = NULL;
- RRDDIM *rd_tcp_connects = NULL;
- RRDDIM *rd_tcp_disconnects = NULL;
- RRDSET *st_tcp_connected = NULL;
- RRDDIM *rd_tcp_connected = NULL;
- RRDSET *st_pcharts = NULL;
- RRDDIM *rd_pcharts = NULL;
-
- if(global_statistics_enabled) {
- st_metrics = rrdset_create_localhost(
- "netdata",
- "statsd_metrics",
- NULL,
- "statsd",
- NULL,
- "Metrics in the netdata statsd database",
- "metrics",
- PLUGIN_STATSD_NAME,
- "stats",
- 132010,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_metrics_gauge = rrddim_add(st_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_counter = rrddim_add(st_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_timer = rrddim_add(st_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_meter = rrddim_add(st_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_histogram = rrddim_add(st_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_set = rrddim_add(st_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_metrics_dictionary = rrddim_add(st_metrics, "dictionaries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- st_useful_metrics = rrdset_create_localhost(
- "netdata",
- "statsd_useful_metrics",
- NULL,
- "statsd",
- NULL,
- "Useful metrics in the netdata statsd database",
- "metrics",
- PLUGIN_STATSD_NAME,
- "stats",
- 132010,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_useful_metrics_gauge = rrddim_add(st_useful_metrics, "gauges", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_counter = rrddim_add(st_useful_metrics, "counters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_timer = rrddim_add(st_useful_metrics, "timers", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_meter = rrddim_add(st_useful_metrics, "meters", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_histogram = rrddim_add(st_useful_metrics, "histograms", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_set = rrddim_add(st_useful_metrics, "sets", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- rd_useful_metrics_dictionary = rrddim_add(st_useful_metrics, "dictionaries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- st_events = rrdset_create_localhost(
- "netdata",
- "statsd_events",
- NULL,
- "statsd",
- NULL,
- "Events processed by the netdata statsd server",
- "events/s",
- PLUGIN_STATSD_NAME,
- "stats",
- 132011,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_events_gauge = rrddim_add(st_events, "gauges", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_counter = rrddim_add(st_events, "counters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_timer = rrddim_add(st_events, "timers", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_meter = rrddim_add(st_events, "meters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_histogram = rrddim_add(st_events, "histograms", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_set = rrddim_add(st_events, "sets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_dictionary = rrddim_add(st_events, "dictionaries", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_unknown = rrddim_add(st_events, "unknown", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_events_errors = rrddim_add(st_events, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- st_reads = rrdset_create_localhost(
- "netdata",
- "statsd_reads",
- NULL,
- "statsd",
- NULL,
- "Read operations made by the netdata statsd server",
- "reads/s",
- PLUGIN_STATSD_NAME,
- "stats",
- 132012,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_reads_tcp = rrddim_add(st_reads, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_reads_udp = rrddim_add(st_reads, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- st_bytes = rrdset_create_localhost(
- "netdata",
- "statsd_bytes",
- NULL,
- "statsd",
- NULL,
- "Bytes read by the netdata statsd server",
- "kilobits/s",
- PLUGIN_STATSD_NAME,
- "stats",
- 132013,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_bytes_tcp = rrddim_add(st_bytes, "tcp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_bytes_udp = rrddim_add(st_bytes, "udp", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
-
- st_packets = rrdset_create_localhost(
- "netdata",
- "statsd_packets",
- NULL,
- "statsd",
- NULL,
- "Network packets processed by the netdata statsd server",
- "packets/s",
- PLUGIN_STATSD_NAME,
- "stats",
- 132014,
- statsd.update_every,
- RRDSET_TYPE_STACKED);
- rd_packets_tcp = rrddim_add(st_packets, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_packets_udp = rrddim_add(st_packets, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- st_tcp_connects = rrdset_create_localhost(
- "netdata",
- "tcp_connects",
- NULL,
- "statsd",
- NULL,
- "statsd server TCP connects and disconnects",
- "events",
- PLUGIN_STATSD_NAME,
- "stats",
- 132015,
- statsd.update_every,
- RRDSET_TYPE_LINE);
- rd_tcp_connects = rrddim_add(st_tcp_connects, "connects", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rd_tcp_disconnects = rrddim_add(st_tcp_connects, "disconnects", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- st_tcp_connected = rrdset_create_localhost(
- "netdata",
- "tcp_connected",
- NULL,
- "statsd",
- NULL,
- "statsd server TCP connected sockets",
- "sockets",
- PLUGIN_STATSD_NAME,
- "stats",
- 132016,
- statsd.update_every,
- RRDSET_TYPE_LINE);
- rd_tcp_connected = rrddim_add(st_tcp_connected, "connected", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- st_pcharts = rrdset_create_localhost(
- "netdata",
- "private_charts",
- NULL,
- "statsd",
- NULL,
- "Private metric charts created by the netdata statsd server",
- "charts",
- PLUGIN_STATSD_NAME,
- "stats",
- 132020,
- statsd.update_every,
- RRDSET_TYPE_AREA);
- rd_pcharts = rrddim_add(st_pcharts, "charts", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- // ----------------------------------------------------------------------------------------------------------------
- // statsd thread to turn metrics into charts
-
- usec_t step = statsd.update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- heartbeat_next(&hb, step);
-
- worker_is_busy(WORKER_STATSD_FLUSH_GAUGES);
- statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge);
-
- worker_is_busy(WORKER_STATSD_FLUSH_COUNTERS);
- statsd_flush_index_metrics(&statsd.counters, statsd_flush_counter);
-
- worker_is_busy(WORKER_STATSD_FLUSH_METERS);
- statsd_flush_index_metrics(&statsd.meters, statsd_flush_meter);
-
- worker_is_busy(WORKER_STATSD_FLUSH_TIMERS);
- statsd_flush_index_metrics(&statsd.timers, statsd_flush_timer);
-
- worker_is_busy(WORKER_STATSD_FLUSH_HISTOGRAMS);
- statsd_flush_index_metrics(&statsd.histograms, statsd_flush_histogram);
-
- worker_is_busy(WORKER_STATSD_FLUSH_SETS);
- statsd_flush_index_metrics(&statsd.sets, statsd_flush_set);
-
- worker_is_busy(WORKER_STATSD_FLUSH_DICTIONARIES);
- statsd_flush_index_metrics(&statsd.dictionaries,statsd_flush_dictionary);
-
- worker_is_busy(WORKER_STATSD_FLUSH_STATS);
- statsd_update_all_app_charts();
-
- if(unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- if(global_statistics_enabled) {
- rrddim_set_by_pointer(st_metrics, rd_metrics_gauge, (collected_number)statsd.gauges.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_counter, (collected_number)statsd.counters.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_timer, (collected_number)statsd.timers.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_meter, (collected_number)statsd.meters.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_histogram, (collected_number)statsd.histograms.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_set, (collected_number)statsd.sets.metrics);
- rrddim_set_by_pointer(st_metrics, rd_metrics_dictionary, (collected_number)statsd.dictionaries.metrics);
- rrdset_done(st_metrics);
-
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_gauge, (collected_number)statsd.gauges.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_counter, (collected_number)statsd.counters.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_timer, (collected_number)statsd.timers.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_meter, (collected_number)statsd.meters.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_histogram, (collected_number)statsd.histograms.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_set, (collected_number)statsd.sets.useful);
- rrddim_set_by_pointer(st_useful_metrics, rd_useful_metrics_dictionary, (collected_number)statsd.dictionaries.useful);
- rrdset_done(st_useful_metrics);
-
- rrddim_set_by_pointer(st_events, rd_events_gauge, (collected_number)statsd.gauges.events);
- rrddim_set_by_pointer(st_events, rd_events_counter, (collected_number)statsd.counters.events);
- rrddim_set_by_pointer(st_events, rd_events_timer, (collected_number)statsd.timers.events);
- rrddim_set_by_pointer(st_events, rd_events_meter, (collected_number)statsd.meters.events);
- rrddim_set_by_pointer(st_events, rd_events_histogram, (collected_number)statsd.histograms.events);
- rrddim_set_by_pointer(st_events, rd_events_set, (collected_number)statsd.sets.events);
- rrddim_set_by_pointer(st_events, rd_events_dictionary, (collected_number)statsd.dictionaries.events);
- rrddim_set_by_pointer(st_events, rd_events_unknown, (collected_number)statsd.unknown_types);
- rrddim_set_by_pointer(st_events, rd_events_errors, (collected_number)statsd.socket_errors);
- rrdset_done(st_events);
-
- rrddim_set_by_pointer(st_reads, rd_reads_tcp, (collected_number)statsd.tcp_socket_reads);
- rrddim_set_by_pointer(st_reads, rd_reads_udp, (collected_number)statsd.udp_socket_reads);
- rrdset_done(st_reads);
-
- rrddim_set_by_pointer(st_bytes, rd_bytes_tcp, (collected_number)statsd.tcp_bytes_read);
- rrddim_set_by_pointer(st_bytes, rd_bytes_udp, (collected_number)statsd.udp_bytes_read);
- rrdset_done(st_bytes);
-
- rrddim_set_by_pointer(st_packets, rd_packets_tcp, (collected_number)statsd.tcp_packets_received);
- rrddim_set_by_pointer(st_packets, rd_packets_udp, (collected_number)statsd.udp_packets_received);
- rrdset_done(st_packets);
-
- rrddim_set_by_pointer(st_tcp_connects, rd_tcp_connects, (collected_number)statsd.tcp_socket_connects);
- rrddim_set_by_pointer(st_tcp_connects, rd_tcp_disconnects, (collected_number)statsd.tcp_socket_disconnects);
- rrdset_done(st_tcp_connects);
-
- rrddim_set_by_pointer(st_tcp_connected, rd_tcp_connected, (collected_number)statsd.tcp_socket_connected);
- rrdset_done(st_tcp_connected);
-
- rrddim_set_by_pointer(st_pcharts, rd_pcharts, (collected_number)statsd.private_charts);
- rrdset_done(st_pcharts);
- }
- }
-
-cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/systemd-journal.plugin/Makefile.am b/collectors/systemd-journal.plugin/Makefile.am
deleted file mode 100644
index 48f667c1b..000000000
--- a/collectors/systemd-journal.plugin/Makefile.am
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- systemd-journal-self-signed-certs.sh \
- forward_secure_sealing.md \
- active_journal_centralization_guide_no_encryption.md \
- passive_journal_centralization_guide_no_encryption.md \
- passive_journal_centralization_guide_self_signed_certs.md \
- $(NULL)
-
-dist_libconfig_DATA = \
- $(NULL)
diff --git a/collectors/systemd-journal.plugin/README.md b/collectors/systemd-journal.plugin/README.md
deleted file mode 100644
index c3c639045..000000000
--- a/collectors/systemd-journal.plugin/README.md
+++ /dev/null
@@ -1,472 +0,0 @@
-
-# `systemd` journal plugin
-
-[KEY FEATURES](#key-features) | [JOURNAL SOURCES](#journal-sources) | [JOURNAL FIELDS](#journal-fields) |
-[PLAY MODE](#play-mode) | [FULL TEXT SEARCH](#full-text-search) | [PERFORMANCE](#query-performance) |
-[CONFIGURATION](#configuration-and-maintenance) | [FAQ](#faq)
-
-The `systemd` journal plugin by Netdata makes viewing, exploring and analyzing `systemd` journal logs simple and
-efficient.
-It automatically discovers available journal sources, allows advanced filtering, offers interactive visual
-representations and supports exploring the logs of both individual servers and the logs on infrastructure wide
-journal centralization servers.
-
-![image](https://github.com/netdata/netdata/assets/2662304/691b7470-ec56-430c-8b81-0c9e49012679)
-
-## Key features
-
-- Works on both **individual servers** and **journal centralization servers**.
-- Supports `persistent` and `volatile` journals.
-- Supports `system`, `user`, `namespaces` and `remote` journals.
-- Allows filtering on **any journal field** or **field value**, for any time-frame.
-- Allows **full text search** (`grep`) on all journal fields, for any time-frame.
-- Provides a **histogram** for log entries over time, with a break down per field-value, for any field and any
- time-frame.
-- Works directly on journal files, without any other third-party components.
-- Supports coloring log entries, the same way `journalctl` does.
-- In PLAY mode provides the same experience as `journalctl -f`, showing new log entries immediately after they are
- received.
-
-### Prerequisites
-
-`systemd-journal.plugin` is a Netdata Function Plugin.
-
-To protect your privacy, as with all Netdata Functions, a free Netdata Cloud user account is required to access it.
-For more information check [this discussion](https://github.com/netdata/netdata/discussions/16136).
-
-### Limitations
-
-#### Plugin availability
-
-The following are limitations related to the availability of the plugin:
-
-- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin.
- The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that
- returns failure on all calls). Starting with Netdata version 1.44, Netdata containers use a Debian base image
- making this plugin available when Netdata is running in a container.
-- For the same reason (lack of `systemd` support for Alpine Linux), the plugin is not available on `static` builds of
- Netdata (which are based on `muslc`, not `glibc`). If your Netdata is installed in `/opt/netdata` you most likely have
- a static build of Netdata.
-- On old systemd systems (like Centos 7), the plugin runs always in "full data query" mode, which makes it slower. The
- reason, is that systemd API is missing some important calls we need to use the field indexes of `systemd` journal.
- However, when running in this mode, the plugin offers also negative matches on the data (like filtering for all logs
- that do not have set some field), and this is the reason "full data query" mode is also offered as an option even on
- newer versions of `systemd`.
-
-#### `systemd` journal features
-
-The following are limitations related to the features of `systemd` journal:
-
-- This plugin assumes that binary field values are text fields with newlines in them. `systemd-journal` has the ability
- to support binary fields, without specifying the nature of the binary data. However, binary fields are commonly used
- to store log entries that include multiple lines of text. The plugin treats all binary fields are multi-line text.
-- This plugin does not support multiple values per field for any given log entry. `systemd` journal has the ability to
- accept the same field key, multiple times, with multiple values on a single log entry. This plugin will present the
- last value and ignore the others for this log entry.
-- This plugin will only read journal files located in `/var/log/journal` or `/run/log/journal`. `systemd-journal-remote` has the
- ability to store journal files anywhere (user configured). If journal files are not located in `/var/log/journal`
- or `/run/log/journal` (and any of their subdirectories), the plugin will not find them. A simple solution is to link
- the other directories somewhere inside `/var/log/journal`. The plugin will pick them up, even if a sub-directory of
- `/var/log/journal` is a link to a directory outside `/var/log/journal`.
-
-Other than the above, this plugin supports all features of `systemd` journals.
-
-## Journal Sources
-
-The plugin automatically detects the available journal sources, based on the journal files available in
-`/var/log/journal` (persistent logs) and `/run/log/journal` (volatile logs).
-
-![journal-sources](https://github.com/netdata/netdata/assets/2662304/28e63a3e-6809-4586-b3b0-80755f340e31)
-
-The plugin, by default, merges all journal sources together, to provide a unified view of all log messages available.
-
-> To improve query performance, we recommend selecting the relevant journal source, before doing more analysis on the
-> logs.
-
-### `system` journals
-
-`system` journals are the default journals available on all `systemd` based systems.
-
-`system` journals contain:
-
-- kernel log messages (via `kmsg`),
-- audit records, originating from the kernel audit subsystem,
-- messages received by `systemd-journald` via `syslog`,
-- messages received via the standard output and error of service units,
-- structured messages received via the native journal API.
-
-### `user` journals
-
-Unlike `journalctl`, the Netdata plugin allows viewing, exploring and querying the journal files of **all users**.
-
-By default, each user, with a UID outside the range of system users (0 - 999), dynamic service users,
-and the nobody user (65534), will get their own set of `user` journal files. For more information about
-this policy check [Users, Groups, UIDs and GIDs on systemd Systems](https://systemd.io/UIDS-GIDS/).
-
-Keep in mind that `user` journals are merged with the `system` journals when they are propagated to a journal
-centralization server. So, at the centralization server, the `remote` journals contain both the `system` and `user`
-journals of the sender.
-
-### `namespaces` journals
-
-The plugin auto-detects the namespaces available and provides a list of all namespaces at the "sources" list on the UI.
-
-Journal namespaces are both a mechanism for logically isolating the log stream of projects consisting
-of one or more services from the rest of the system and a mechanism for improving performance.
-
-`systemd` service units may be assigned to a specific journal namespace through the `LogNamespace=` unit file setting.
-
-Keep in mind that namespaces require special configuration to be propagated to a journal centralization server.
-This makes them a little more difficult to handle, from the administration perspective.
-
-### `remote` journals
-
-Remote journals are created by `systemd-journal-remote`. This `systemd` feature allows creating logs centralization
-points within your infrastructure, based exclusively on `systemd`.
-
-Usually `remote` journals are named by the IP of the server sending these logs. The Netdata plugin automatically
-extracts these IPs and performs a reverse DNS lookup to find their hostnames. When this is successful,
-`remote` journals are named by the hostnames of the origin servers.
-
-For information about configuring a journal centralization server,
-check [this FAQ item](#how-do-i-configure-a-journal-centralization-server).
-
-## Journal Fields
-
-`systemd` journals are designed to support multiple fields per log entry. The power of `systemd` journals is that,
-unlike other log management systems, it supports dynamic and variable fields for each log message,
-while all fields and their values are indexed for fast querying.
-
-This means that each application can log messages annotated with its own unique fields and values, and `systemd`
-journals will automatically index all of them, without any configuration or manual action.
-
-For a description of the most frequent fields found in `systemd` journals, check `man systemd.journal-fields`.
-
-Fields found in the journal files are automatically added to the UI in multiple places to help you explore
-and filter the data.
-
-The plugin automatically enriches certain fields to make them more user-friendly:
-
-- `_BOOT_ID`: the hex value is annotated with the timestamp of the first message encountered for this boot id.
-- `PRIORITY`: the numeric value is replaced with the human-readable name of each priority.
-- `SYSLOG_FACILITY`: the encoded value is replaced with the human-readable name of each facility.
-- `ERRNO`: the numeric value is annotated with the short name of each value.
-- `_UID` `_AUDIT_LOGINUID`, `_SYSTEMD_OWNER_UID`, `OBJECT_UID`, `OBJECT_SYSTEMD_OWNER_UID`, `OBJECT_AUDIT_LOGINUID`:
- the local user database is consulted to annotate them with usernames.
-- `_GID`, `OBJECT_GID`: the local group database is consulted to annotate them with group names.
-- `_CAP_EFFECTIVE`: the encoded value is annotated with a human-readable list of the linux capabilities.
-- `_SOURCE_REALTIME_TIMESTAMP`: the numeric value is annotated with human-readable datetime in UTC.
-- `MESSAGE_ID`: for the known `MESSAGE_ID`s, the value is replaced with the well known name of the event.
-
-The values of all other fields are presented as found in the journals.
-
-> IMPORTANT:
-> The UID and GID annotations are added during presentation and are taken from the server running the plugin.
-> For `remote` sources, the names presented may not reflect the actual user and group names on the origin server.
-> The numeric value will still be visible though, as-is on the origin server.
-
-The annotations are not searchable with full-text search. They are only added for the presentation of the fields.
-
-### Journal fields as columns in the table
-
-All journal fields available in the journal files are offered as columns on the UI. Use the gear button above the table:
-
-![image](https://github.com/netdata/netdata/assets/2662304/cd75fb55-6821-43d4-a2aa-033792c7f7ac)
-
-### Journal fields as additional info to each log entry
-
-When you click a log line, the `info` sidebar will open on the right of the screen, to provide the full list of fields
-related to this log line. You can close this `info` sidebar, by selecting the filter icon at its top.
-
-![image](https://github.com/netdata/netdata/assets/2662304/3207794c-a61b-444c-8ffe-6c07cbc90ae2)
-
-### Journal fields as filters
-
-The plugin presents a select list of fields as filters to the query, with counters for each of the possible values
-for the field. This list can used to quickly check which fields and values are available for the entire time-frame
-of the query.
-
-Internally the plugin has:
-
-1. A white-list of fields, to be presented as filters.
-2. A black-list of fields, to prevent them from becoming filters. This list includes fields with a very high
- cardinality, like timestamps, unique message ids, etc. This is mainly for protecting the server's performance,
- to avoid building in memory indexes for the fields that almost each of their values is unique.
-
-Keep in mind that the values presented in the filters, and their sorting is affected by the "full data queries"
-setting:
-
-![image](https://github.com/netdata/netdata/assets/2662304/ac710d46-07c2-487b-8ce3-e7f767b9ae0f)
-
-When "full data queries" is off, empty values are hidden and cannot be selected. This is due to a limitation of
-`libsystemd` that does not allow negative or empty matches. Also, values with zero counters may appear in the list.
-
-When "full data queries" is on, Netdata is applying all filtering to the data (not `libsystemd`), but this means
-that all the data of the entire time-frame, without any filtering applied, have to be read by the plugin to prepare
-the response required. So, "full data queries" can be significantly slower over long time-frames.
-
-### Journal fields as histogram sources
-
-The plugin presents a histogram of the number of log entries across time.
-
-The data source of this histogram can be any of the fields that are available as filters.
-For each of the values this field has, across the entire time-frame of the query, the histogram will get corresponding
-dimensions, showing the number of log entries, per value, over time.
-
-The granularity of the histogram is adjusted automatically to have about 150 columns visible on screen.
-
-The histogram presented by the plugin is interactive:
-
-- **Zoom**, either with the global date-time picker, or the zoom tool in the histogram's toolbox.
-- **Pan**, either with global date-time picker, or by dragging with the mouse the chart to the left or the right.
-- **Click**, to quickly jump to the highlighted point in time in the log entries.
-
-![image](https://github.com/netdata/netdata/assets/2662304/d3dcb1d1-daf4-49cf-9663-91b5b3099c2d)
-
-## PLAY mode
-
-The plugin supports PLAY mode, to continuously update the screen with new log entries found in the journal files.
-Just hit the "play" button at the top of the Netdata dashboard screen.
-
-On centralized log servers, PLAY mode provides a unified view of all the new logs encountered across the entire
-infrastructure,
-from all hosts sending logs to the central logs server via `systemd-remote`.
-
-## Full-text search
-
-The plugin supports searching for any text on all fields of the log entries.
-
-Full text search is combined with the selected filters.
-
-The text box accepts asterisks `*` as wildcards. So, `a*b*c` means match anything that contains `a`, then `b` and
-then `c` with anything between them.
-
-Spaces are treated as OR expressions. So that `a*b c*d` means `a*b OR c*d`.
-
-Negative expressions are supported, by prefixing any string with `!`. Example: `!systemd *` means match anything that
-does not contain `systemd` on any of its fields.
-
-## Query performance
-
-Journal files are designed to be accessed by multiple readers and one writer, concurrently.
-
-Readers (like this Netdata plugin), open the journal files and `libsystemd`, behind the scenes, maps regions
-of the files into memory, to satisfy each query.
-
-On logs aggregation servers, the performance of the queries depend on the following factors:
-
-1. The **number of files** involved in each query.
-
- This is why we suggest to select a source when possible.
-
-2. The **speed of the disks** hosting the journal files.
-
- Journal files perform a lot of reading while querying, so the fastest the disks, the faster the query will finish.
-
-3. The **memory available** for caching parts of the files.
-
- Increased memory will help the kernel cache the most frequently used parts of the journal files, avoiding disk I/O
- and speeding up queries.
-
-4. The **number of filters** applied.
-
- Queries are significantly faster when just a few filters are selected.
-
-In general, for a faster experience, **keep a low number of rows within the visible timeframe**.
-
-Even on long timeframes, selecting a couple of filters that will result in a **few dozen thousand** log entries
-will provide fast / rapid responses, usually less than a second. To the contrary, viewing timeframes with **millions
-of entries** may result in longer delays.
-
-The plugin aborts journal queries when your browser cancels inflight requests. This allows you to work on the UI
-while there are background queries running.
-
-At the time of this writing, this Netdata plugin is about 25-30 times faster than `journalctl` on queries that access
-multiple journal files, over long time-frames.
-
-During the development of this plugin, we submitted, to `systemd`, a number of patches to improve `journalctl`
-performance by a factor of 14:
-
-- <https://github.com/systemd/systemd/pull/29365>
-- <https://github.com/systemd/systemd/pull/29366>
-- <https://github.com/systemd/systemd/pull/29261>
-
-However, even after these patches are merged, `journalctl` will still be 2x slower than this Netdata plugin,
-on multi-journal queries.
-
-The problem lies in the way `libsystemd` handles multi-journal file queries. To overcome this problem,
-the Netdata plugin queries each file individually and it then it merges the results to be returned.
-This is transparent, thanks to the `facets` library in `libnetdata` that handles on-the-fly indexing, filtering,
-and searching of any dataset, independently of its source.
-
-## Performance at scale
-
-On busy logs servers, or when querying long timeframes that match millions of log entries, the plugin has a sampling
-algorithm to allow it respond promptly. It works like this:
-
-1. The latest 500k log entries are queried in full, evaluating all the fields of every single log entry. This evaluation
- allows counting the unique values per field, updating the counters next to each value at the filters section of the
- dashboard.
-2. When the latest 500k log entries have been processed and there are more data to read, the plugin divides evenly 500k
- more log entries to the number of journal files matched by the query. So, it will continue to evaluate all the fields
- of all log entries, up to the budget per file, aiming to fully query 1 million log entries in total.
-3. When the budget is hit for a given file, the plugin continues to scan log entries, but this time it does not evaluate
- the fields and their values, so the counters per field and value are not updated. These unsampled log entries are
- shown in the histogram with the label `[unsampled]`.
-4. The plugin continues to count `[unsampled]` entries until as many as sampled entries have been evaluated and at least
- 1% of the journal file has been processed.
-5. When the `[unsampled]` budget is exhausted, the plugin stops processing the journal file and based on the processing
- completed so far and the number of entries in the journal file, it estimates the remaining number of log entries in
- that file. This is shown as `[estimated]` at the histogram.
-6. In systemd versions 254 or later, the plugin fetches the unique sequence number of each log entry and calculates the
- the percentage of the file matched by the query, versus the total number of the log entries in the journal file.
-7. In systemd versions prior to 254, the plugin estimates the number of entries the journal file contributes to the
- query, using the amount of log entries matched it vs. the total duration the log file has entries for.
-
-The above allow the plugin to respond promptly even when the number of log entries in the journal files is several
-dozens millions, while providing accurate estimations of the log entries over time at the histogram and enough counters
-at the fields filtering section to help users get an overview of the whole timeframe.
-
-The fact that the latest 500k log entries and 1% of all journal files (which are spread over time) have been fully
-evaluated, including counting the number of appearances for each field value, the plugin usually provides an accurate
-representation of the whole timeframe.
-
-Keep in mind that although the plugin is quite effective and responds promptly when there are hundreds of journal files
-matching a query, response times may be longer when there are several thousands of smaller files. systemd versions 254+
-attempt to solve this problem by allowing `systemd-journal-remote` to create larger files. However, for systemd
-versions prior to 254, `systemd-journal-remote` creates files of up to 32MB each, which when running very busy
-journals centralization servers aggregating several thousands of log entries per second, the number of files can grow
-to several dozens of thousands quickly. In such setups, the plugin should ideally skip processing journal files
-entirely, relying solely on the estimations of the sequence of files each file is part of. However, this has not been
-implemented yet. To improve the query performance in such setups, the user has to query smaller timeframes.
-
-Another optimization taking place in huge journal centralization points, is the initial scan of the database. The plugin
-needs to know the list of all journal files available, including the details of the first and the last message in each
-of them. When there are several thousands of files in a directory (like it usually happens in `/var/log/journal/remote`),
-directory listing and examination of each file can take a considerable amount of time (even `ls -l` takes minutes).
-To work around this problem, the plugin uses `inotify` to receive file updates immediately and scans the library from
-the newest to the oldest file, allowing the user interface to work immediately after startup, for the most recent
-timeframes.
-
-### Best practices for better performance
-
-systemd-journal has been designed **first to be reliable** and then to be fast. It includes several mechanisms to ensure
-minimal data loss under all conditions (e.g. disk corruption, tampering, forward secure sealing) and despite the fact
-that it utilizes several techniques to require minimal disk footprint (like deduplication of log entries, linking of
-values and fields, compression) the disk footprint of journal files remains significantly higher compared to other log
-management solutions.
-
-The higher disk footprint results in higher disk I/O during querying, since a lot more data have to read from disk to
-evaluate a query. Query performance at scale can greatly benefit by utilizing a compressed filesystem (ext4, btrfs, zfs)
-to store systemd-journal files.
-
-systemd-journal files are cached by the operating system. There is no database server to serve queries. Each file is
-opened and the query runs by directly accessing the data in it.
-
-Therefore systemd-journal relies on the caching layer of the operating system to optimize query performance. The more
-RAM the system has, although it will not be reported as `used` (it will be reported as `cache`), the faster the queries
-will get. The first time a timeframe is accessed the query performance will be slower, but further queries on the same
-timeframe will be significantly faster since journal data are now cached in memory.
-
-So, on busy logs centralization systems, queries performance can be improved significantly by using a compressed
-filesystem for storing the journal files, and higher amounts of RAM.
-
-## Configuration and maintenance
-
-This Netdata plugin does not require any configuration or maintenance.
-
-## FAQ
-
-### Can I use this plugin on journal centralization servers?
-
-Yes. You can centralize your logs using `systemd-journal-remote`, and then install Netdata
-on this logs centralization server to explore the logs of all your infrastructure.
-
-This plugin will automatically provide multi-node views of your logs and also give you the ability to combine the logs
-of multiple servers, as you see fit.
-
-Check [configuring a logs centralization server](#how-do-i-configure-a-journal-centralization-server).
-
-### Can I use this plugin from a parent Netdata?
-
-Yes. When your nodes are connected to a Netdata parent, all their functions are available
-via the parent's UI. So, from the parent UI, you can access the functions of all your nodes.
-
-Keep in mind that to protect your privacy, in order to access Netdata functions, you need a
-free Netdata Cloud account.
-
-### Is any of my data exposed to Netdata Cloud from this plugin?
-
-No. When you access the agent directly, none of your data passes through Netdata Cloud.
-You need a free Netdata Cloud account only to verify your identity and enable the use of
-Netdata Functions. Once this is done, all the data flow directly from your Netdata agent
-to your web browser.
-
-Also check [this discussion](https://github.com/netdata/netdata/discussions/16136).
-
-When you access Netdata via `https://app.netdata.cloud`, your data travel via Netdata Cloud,
-but they are not stored in Netdata Cloud. This is to allow you access your Netdata agents from
-anywhere. All communication from/to Netdata Cloud is encrypted.
-
-### What are `volatile` and `persistent` journals?
-
-`systemd` `journald` allows creating both `volatile` journals in a `tmpfs` ram drive,
-and `persistent` journals stored on disk.
-
-`volatile` journals are particularly useful when the system monitored is sensitive to
-disk I/O, or does not have any writable disks at all.
-
-For more information check `man systemd-journald`.
-
-### I centralize my logs with Loki. Why to use Netdata for my journals?
-
-`systemd` journals have almost infinite cardinality at their labels and all of them are indexed,
-even if every single message has unique fields and values.
-
-When you send `systemd` journal logs to Loki, even if you use the `relabel_rules` argument to
-`loki.source.journal` with a JSON format, you need to specify which of the fields from journald
-you want inherited by Loki. This means you need to know the most important fields beforehand.
-At the same time you loose all the flexibility `systemd` journal provides:
-**indexing on all fields and all their values**.
-
-Loki generally assumes that all logs are like a table. All entries in a stream share the same
-fields. But journald does exactly the opposite. Each log entry is unique and may have its own unique fields.
-
-So, Loki and `systemd-journal` are good for different use cases.
-
-`systemd-journal` already runs in your systems. You use it today. It is there inside all your systems
-collecting the system and applications logs. And for its use case, it has advantages over other
-centralization solutions. So, why not use it?
-
-### Is it worth to build a `systemd` logs centralization server?
-
-Yes. It is simple, fast and the software to do it is already in your systems.
-
-For application and system logs, `systemd` journal is ideal and the visibility you can get
-by centralizing your system logs and the use of this Netdata plugin, is unparalleled.
-
-### How do I configure a journal centralization server?
-
-A short summary to get journal server running can be found below.
-There are two strategies you can apply, when it comes down to a centralized server for `systemd` journal logs.
-
-1. _Active sources_, where the centralized server fetches the logs from each individual server
-2. _Passive sources_, where the centralized server accepts a log stream from an individual server.
-
-For more options and reference to documentation, check `man systemd-journal-remote` and `man systemd-journal-upload`.
-
-#### _passive_ journal centralization without encryption
-
-If you want to setup your own passive journal centralization setup without encryption, [check out guide on it](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md).
-
-#### _passive_ journal centralization with encryption using self-signed certificates
-
-If you want to setup your own passive journal centralization setup using self-signed certificates for encryption, [check out guide on it](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md).
-
-#### Limitations when using a logs centralization server
-
-As of this writing `namespaces` support by `systemd` is limited:
-
-- Docker containers cannot log to namespaces. Check [this issue](https://github.com/moby/moby/issues/41879).
-- `systemd-journal-upload` automatically uploads `system` and `user` journals, but not `namespaces` journals. For this
- you need to spawn a `systemd-journal-upload` per namespace.
diff --git a/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md b/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md
deleted file mode 100644
index cbed1e81e..000000000
--- a/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md
+++ /dev/null
@@ -1,126 +0,0 @@
-# Active journal source without encryption
-
-This page will guide you through creating an active journal source without the use of encryption.
-
-Once you enable an active journal source on a server, `systemd-journal-gatewayd` will expose an REST API on TCP port 19531. This API can be used for querying the logs, exporting the logs, or monitoring new log entries, remotely.
-
-> ⚠️ **IMPORTANT**<br/>
-> These instructions will expose your logs to the network, without any encryption or authorization.<br/>
-> DO NOT USE THIS ON NON-TRUSTED NETWORKS.
-
-## Configuring an active journal source
-
-On the server you want to expose their logs, install `systemd-journal-gateway`.
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-gateway
-```
-
-Optionally, if you want to change the port (the default is `19531`), edit `systemd-journal-gatewayd.socket`
-
-```bash
-# edit the socket file
-sudo systemctl edit systemd-journal-gatewayd.socket
-```
-
-and add the following lines into the instructed place, and choose your desired port; save and exit.
-
-```bash
-[Socket]
-ListenStream=<DESIRED_PORT>
-```
-
-Finally, enable it, so that it will start automatically upon receiving a connection:
-
-```bash
-# enable systemd-journal-remote
-sudo systemctl daemon-reload
-sudo systemctl enable --now systemd-journal-gatewayd.socket
-```
-
-## Using the active journal source
-
-### Simple Logs Explorer
-
-`systemd-journal-gateway` provides a simple HTML5 application to browse the logs.
-
-To use it, open your web browser and navigate to:
-
-```
-http://server.ip:19531/browse
-```
-
-A simple page like this will be presented:
-
-![image](https://github.com/netdata/netdata/assets/2662304/4da88bf8-6398-468b-a359-68db0c9ad419)
-
-### Use it with `curl`
-
-`man systemd-journal-gatewayd` documents the supported API methods and provides examples to query the API using `curl` commands.
-
-### Copying the logs to a central journals server
-
-`systemd-journal-remote` has the ability to query instances of `systemd-journal-gatewayd` to fetch their logs, so that the central server fetches the logs, instead of waiting for the individual servers to push their logs to it.
-
-However, this kind of logs centralization has a key problem: **there is no guarantee that there will be no gaps in the logs replicated**. Theoretically, the REST API of `systemd-journal-gatewayd` supports querying past data, and `systemd-journal-remote` could keep track of the state of replication and automatically continue from the point it stopped last time. But it does not. So, currently the best logs centralization option is to use a **passive** centralization, where the clients push their logs to the server.
-
-Given these limitations, if you still want to configure an **active** journals centralization, this is what you need to do:
-
-On the centralization server install `systemd-journal-remote`:
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-remote
-```
-
-Then, copy `systemd-journal-remote.service` to configure it for querying the active source:
-
-```bash
-# replace "clientX" with the name of the active client node
-sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/systemd-journal-remote-clientX.service
-
-# edit it to make sure it the ExecStart line is like this:
-# ExecStart=/usr/lib/systemd/systemd-journal-remote --url http://clientX:19531/entries?follow
-sudo nano /etc/systemd/system/systemd-journal-remote-clientX.service
-
-# reload systemd
-sudo systemctl daemon-reload
-```
-
-```bash
-# enable systemd-journal-remote
-sudo systemctl enable --now systemd-journal-remote-clientX.service
-```
-
-You can repeat this process to create as many `systemd-journal-remote` services, as the active source you have.
-
-## Verify it works
-
-To verify the central server is receiving logs, run this on the central server:
-
-```bash
-sudo ls -l /var/log/journal/remote/
-```
-
-You should see new files from the client's hostname or IP.
-
-Also, any of the new service files (`systemctl status systemd-journal-clientX`) should show something like this:
-
-```bash
-● systemd-journal-clientX.service - Fetching systemd journal logs from 192.168.2.146
- Loaded: loaded (/etc/systemd/system/systemd-journal-clientX.service; enabled; preset: disabled)
- Drop-In: /usr/lib/systemd/system/service.d
- └─10-timeout-abort.conf
- Active: active (running) since Wed 2023-10-18 07:35:52 EEST; 23min ago
- Main PID: 77959 (systemd-journal)
- Tasks: 2 (limit: 6928)
- Memory: 7.7M
- CPU: 518ms
- CGroup: /system.slice/systemd-journal-clientX.service
- ├─77959 /usr/lib/systemd/systemd-journal-remote --url "http://192.168.2.146:19531/entries?follow"
- └─77962 curl "-HAccept: application/vnd.fdo.journal" --silent --show-error "http://192.168.2.146:19531/entries?follow"
-
-Oct 18 07:35:52 systemd-journal-server systemd[1]: Started systemd-journal-clientX.service - Fetching systemd journal logs from 192.168.2.146.
-Oct 18 07:35:52 systemd-journal-server systemd-journal-remote[77959]: Spawning curl http://192.168.2.146:19531/entries?follow...
-```
diff --git a/collectors/systemd-journal.plugin/forward_secure_sealing.md b/collectors/systemd-journal.plugin/forward_secure_sealing.md
deleted file mode 100644
index b41570d68..000000000
--- a/collectors/systemd-journal.plugin/forward_secure_sealing.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Forward Secure Sealing (FSS) in Systemd-Journal
-
-Forward Secure Sealing (FSS) is a feature in the systemd journal designed to detect log file tampering.
-Given that attackers often try to hide their actions by modifying or deleting log file entries,
-FSS provides administrators with a mechanism to identify any such unauthorized alterations.
-
-## Importance
-Logs are a crucial component of system monitoring and auditing. Ensuring their integrity means administrators can trust
-the data, detect potential breaches, and trace actions back to their origins. Traditional methods to maintain this
-integrity involve writing logs to external systems or printing them out. While these methods are effective, they are
-not foolproof. FSS offers a more streamlined approach, allowing for log verification directly on the local system.
-
-## How FSS Works
-FSS operates by "sealing" binary logs at regular intervals. This seal is a cryptographic operation, ensuring that any
-tampering with the logs prior to the sealing can be detected. If an attacker modifies logs before they are sealed,
-these changes become a permanent part of the sealed record, highlighting any malicious activity.
-
-The technology behind FSS is based on "Forward Secure Pseudo Random Generators" (FSPRG), a concept stemming from
-academic research.
-
-Two keys are central to FSS:
-
-- **Sealing Key**: Kept on the system, used to seal the logs.
-- **Verification Key**: Stored securely off-system, used to verify the sealed logs.
-
-Every so often, the sealing key is regenerated in a non-reversible process, ensuring that old keys are obsolete and the
-latest logs are sealed with a fresh key. The off-site verification key can regenerate any past sealing key, allowing
-administrators to verify older seals. If logs are tampered with, verification will fail, alerting administrators to the
-breach.
-
-## Enabling FSS
-To enable FSS, use the following command:
-
-```bash
-journalctl --setup-keys
-```
-
-By default, systemd will seal the logs every 15 minutes. However, this interval can be adjusted using a flag during key
-generation. For example, to seal logs every 10 seconds:
-
-```bash
-journalctl --setup-keys --interval=10s
-```
-
-## Verifying Journals
-After enabling FSS, you can verify the integrity of your logs using the verification key:
-
-```bash
-journalctl --verify
-```
-
-If any discrepancies are found, you'll be alerted, indicating potential tampering.
-
-## Disabling FSS
-Should you wish to disable FSS:
-
-**Delete the Sealing Key**: This stops new log entries from being sealed.
-
-```bash
-journalctl --rotate
-```
-
-**Rotate and Prune the Journals**: This will start a new unsealed journal and can remove old sealed journals.
-
-```bash
-journalctl --vacuum-time=1s
-```
-
-
-**Adjust Systemd Configuration (Optional)**: If you've made changes to facilitate FSS in `/etc/systemd/journald.conf`,
-consider reverting or adjusting those. Restart the systemd-journald service afterward:
-
-```bash
-systemctl restart systemd-journald
-```
-
-## Conclusion
-FSS is a significant advancement in maintaining log integrity. While not a replacement for all traditional integrity
-methods, it offers a valuable tool in the battle against unauthorized log tampering. By integrating FSS into your log
-management strategy, you ensure a more transparent, reliable, and tamper-evident logging system.
diff --git a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md
deleted file mode 100644
index b70c22033..000000000
--- a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Passive journal centralization without encryption
-
-This page will guide you through creating a passive journal centralization setup without the use of encryption.
-
-Once you centralize your infrastructure logs to a server, Netdata will automatically detects all the logs from all servers and organize them in sources.
-With the setup described in this document, journal files are identified by the IPs of the clients sending the logs. Netdata will automatically do
-reverse DNS lookups to find the names of the server and name the sources on the dashboard accordingly.
-
-A _passive_ journal server waits for clients to push their metrics to it, so in this setup we will:
-
-1. configure `systemd-journal-remote` on the server, to listen for incoming connections.
-2. configure `systemd-journal-upload` on the clients, to push their logs to the server.
-
-> ⚠️ **IMPORTANT**<br/>
-> These instructions will copy your logs to a central server, without any encryption or authorization.<br/>
-> DO NOT USE THIS ON NON-TRUSTED NETWORKS.
-
-## Server configuration
-
-On the centralization server install `systemd-journal-remote`:
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-remote
-```
-
-Make sure the journal transfer protocol is `http`:
-
-```bash
-sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/
-
-# edit it to make sure it says:
-# --listen-http=-3
-# not:
-# --listen-https=-3
-sudo nano /etc/systemd/system/systemd-journal-remote.service
-
-# reload systemd
-sudo systemctl daemon-reload
-```
-
-Optionally, if you want to change the port (the default is `19532`), edit `systemd-journal-remote.socket`
-
-```bash
-# edit the socket file
-sudo systemctl edit systemd-journal-remote.socket
-```
-
-and add the following lines into the instructed place, and choose your desired port; save and exit.
-
-```bash
-[Socket]
-ListenStream=<DESIRED_PORT>
-```
-
-Finally, enable it, so that it will start automatically upon receiving a connection:
-
-```bash
-# enable systemd-journal-remote
-sudo systemctl enable --now systemd-journal-remote.socket
-sudo systemctl enable systemd-journal-remote.service
-```
-
-`systemd-journal-remote` is now listening for incoming journals from remote hosts.
-
-## Client configuration
-
-On the clients, install `systemd-journal-remote` (it includes `systemd-journal-upload`):
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-remote
-```
-
-Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so:
-
-```conf
-[Upload]
-URL=http://centralization.server.ip:19532
-```
-
-Edit `systemd-journal-upload`, and add `Restart=always` to make sure the client will keep trying to push logs, even if the server is temporarily not there, like this:
-
-```bash
-sudo systemctl edit systemd-journal-upload
-```
-
-At the top, add:
-
-```conf
-[Service]
-Restart=always
-```
-
-Enable and start `systemd-journal-upload`, like this:
-
-```bash
-sudo systemctl enable systemd-journal-upload
-sudo systemctl start systemd-journal-upload
-```
-
-## Verify it works
-
-To verify the central server is receiving logs, run this on the central server:
-
-```bash
-sudo ls -l /var/log/journal/remote/
-```
-
-You should see new files from the client's IP.
-
-Also, `systemctl status systemd-journal-remote` should show something like this:
-
-```bash
-systemd-journal-remote.service - Journal Remote Sink Service
- Loaded: loaded (/etc/systemd/system/systemd-journal-remote.service; indirect; preset: disabled)
- Active: active (running) since Sun 2023-10-15 14:29:46 EEST; 2h 24min ago
-TriggeredBy: ● systemd-journal-remote.socket
- Docs: man:systemd-journal-remote(8)
- man:journal-remote.conf(5)
- Main PID: 2118153 (systemd-journal)
- Status: "Processing requests..."
- Tasks: 1 (limit: 154152)
- Memory: 2.2M
- CPU: 71ms
- CGroup: /system.slice/systemd-journal-remote.service
- └─2118153 /usr/lib/systemd/systemd-journal-remote --listen-http=-3 --output=/var/log/journal/remote/
-```
-
-Note the `status: "Processing requests..."` and the PID under `CGroup`.
-
-On the client `systemctl status systemd-journal-upload` should show something like this:
-
-```bash
-● systemd-journal-upload.service - Journal Remote Upload Service
- Loaded: loaded (/lib/systemd/system/systemd-journal-upload.service; enabled; vendor preset: disabled)
- Drop-In: /etc/systemd/system/systemd-journal-upload.service.d
- └─override.conf
- Active: active (running) since Sun 2023-10-15 10:39:04 UTC; 3h 17min ago
- Docs: man:systemd-journal-upload(8)
- Main PID: 4169 (systemd-journal)
- Status: "Processing input..."
- Tasks: 1 (limit: 13868)
- Memory: 3.5M
- CPU: 1.081s
- CGroup: /system.slice/systemd-journal-upload.service
- └─4169 /lib/systemd/systemd-journal-upload --save-state
-```
-
-Note the `Status: "Processing input..."` and the PID under `CGroup`.
diff --git a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md b/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md
deleted file mode 100644
index 722d1ceae..000000000
--- a/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md
+++ /dev/null
@@ -1,250 +0,0 @@
-# Passive journal centralization with encryption using self-signed certificates
-
-This page will guide you through creating a **passive** journal centralization setup using **self-signed certificates** for encryption and authorization.
-
-Once you centralize your infrastructure logs to a server, Netdata will automatically detect all the logs from all servers and organize them in sources. With the setup described in this document, on recent systemd versions, Netdata will automatically name all remote sources using the names of the clients, as they are described at their certificates (on older versions, the names will be IPs or reverse DNS lookups of the IPs).
-
-A **passive** journal server waits for clients to push their metrics to it, so in this setup we will:
-
-1. configure a certificates authority and issue self-signed certificates for your servers.
-2. configure `systemd-journal-remote` on the server, to listen for incoming connections.
-3. configure `systemd-journal-upload` on the clients, to push their logs to the server.
-
-Keep in mind that the authorization involved works like this:
-
-1. The server (`systemd-journal-remote`) validates that the client (`systemd-journal-upload`) uses a trusted certificate (a certificate issued by the same certificate authority as its own).
- So, **the server will accept logs from any client having a valid certificate**.
-2. The client (`systemd-journal-upload`) validates that the receiver (`systemd-journal-remote`) uses a trusted certificate (like the server does) and it also checks that the hostname or IP of the URL specified to its configuration, matches one of the names or IPs of the server it gets connected to. So, **the client does a validation that it connected to the right server**, using the URL hostname against the names and IPs of the server on its certificate.
-
-This means, that if both certificates are issued by the same certificate authority, only the client can potentially reject the server.
-
-## Self-signed certificates
-
-To simplify the process of creating and managing self-signed certificates, we have created [this bash script](https://github.com/netdata/netdata/blob/master/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh).
-
-This helps to also automate the distribution of the certificates to your servers (it generates a new bash script for each of your servers, which includes everything required, including the certificates).
-
-We suggest to keep this script and all the involved certificates at the journals centralization server, in the directory `/etc/ssl/systemd-journal`, so that you can make future changes as required. If you prefer to keep the certificate authority and all the certificates at a more secure location, just use the script on that location.
-
-On the server that will issue the certificates (usually the centralizaton server), do the following:
-
-```bash
-# install systemd-journal-remote to add the users and groups required and openssl for the certs
-# change this according to your distro
-sudo apt-get install systemd-journal-remote openssl
-
-# download the script and make it executable
-curl >systemd-journal-self-signed-certs.sh "https://raw.githubusercontent.com/netdata/netdata/master/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh"
-chmod 750 systemd-journal-self-signed-certs.sh
-```
-
-To create certificates for your servers, run this:
-
-```bash
-sudo ./systemd-journal-self-signed-certs.sh "server1" "DNS:hostname1" "IP:10.0.0.1"
-```
-
-Where:
-
- - `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard.
- - `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server.
- - `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server.
-
-Repeat this process to create the certificates for all your servers. You can add servers as required, at any time in the future.
-
-Existing certificates are never re-generated. Typically certificates need to be revoked and new ones to be issued. But `systemd-journal-remote` tools do not support handling revocations. So, the only option you have to re-issue a certificate is to delete its files in `/etc/ssl/systemd-journal` and run the script again to create a new one.
-
-Once you run the script of each of your servers, in `/etc/ssl/systemd-journal` you will find shell scripts named `runme-on-XXX.sh`, where `XXX` are the canonical names of your servers.
-
-These `runme-on-XXX.sh` include everything to install the certificates, fix their file permissions to be accessible by `systemd-journal-remote` and `systemd-journal-upload`, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf`.
-
-You can copy and paste (or `scp`) these scripts on your server and each of your clients:
-
-```bash
-sudo scp /etc/ssl/systemd-journal/runme-on-XXX.sh XXX:/tmp/
-```
-
-For the rest of this guide, we assume that you have copied the right `runme-on-XXX.sh` at the `/tmp` of all the servers for which you issued certificates.
-
-### note about certificates file permissions
-
-It is worth noting that `systemd-journal` certificates need to be owned by `systemd-journal-remote:systemd-journal`.
-
-Both the user `systemd-journal-remote` and the group `systemd-journal` are automatically added by the `systemd-journal-remote` package. However, `systemd-journal-upload` (and `systemd-journal-gatewayd` - that is not used in this guide) use dynamic users. Thankfully they are added to the `systemd-journal` remote group.
-
-So, by having the certificates owned by `systemd-journal-remote:systemd-journal`, satisfies both `systemd-journal-remote` which is not in the `systemd-journal` group, and `systemd-journal-upload` (and `systemd-journal-gatewayd`) which use dynamic users.
-
-You don't need to do anything about it (the scripts take care of everything), but it is worth noting how this works.
-
-## Server configuration
-
-On the centralization server install `systemd-journal-remote`:
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-remote
-```
-
-Make sure the journal transfer protocol is `https`:
-
-```bash
-sudo cp /lib/systemd/system/systemd-journal-remote.service /etc/systemd/system/
-
-# edit it to make sure it says:
-# --listen-https=-3
-# not:
-# --listen-http=-3
-sudo nano /etc/systemd/system/systemd-journal-remote.service
-
-# reload systemd
-sudo systemctl daemon-reload
-```
-
-Optionally, if you want to change the port (the default is `19532`), edit `systemd-journal-remote.socket`
-
-```bash
-# edit the socket file
-sudo systemctl edit systemd-journal-remote.socket
-```
-
-and add the following lines into the instructed place, and choose your desired port; save and exit.
-
-```bash
-[Socket]
-ListenStream=<DESIRED_PORT>
-```
-
-Next, run the `runme-on-XXX.sh` script on the server:
-
-```bash
-# if you run the certificate authority on the server:
-sudo /etc/ssl/systemd-journal/runme-on-XXX.sh
-
-# if you run the certificate authority elsewhere,
-# assuming you have coped the runme-on-XXX.sh script (as described above):
-sudo bash /tmp/runme-on-XXX.sh
-```
-
-This will install the certificates in `/etc/ssl/systemd-journal`, set the right file permissions, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf` to use the right certificate files.
-
-Finally, enable it, so that it will start automatically upon receiving a connection:
-
-```bash
-# enable systemd-journal-remote
-sudo systemctl enable --now systemd-journal-remote.socket
-sudo systemctl enable systemd-journal-remote.service
-```
-
-`systemd-journal-remote` is now listening for incoming journals from remote hosts.
-
-> When done, remember to `rm /tmp/runme-on-*.sh` to make sure your certificates are secure.
-
-## Client configuration
-
-On the clients, install `systemd-journal-remote` (it includes `systemd-journal-upload`):
-
-```bash
-# change this according to your distro
-sudo apt-get install systemd-journal-remote
-```
-
-Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so:
-
-```conf
-[Upload]
-URL=https://centralization.server.ip:19532
-```
-
-Make sure that `centralization.server.ip` is one of the `DNS:` or `IP:` parameters you defined when you created the centralization server certificates. If it is not, the client may reject to connect.
-
-Next, edit `systemd-journal-upload.service`, and add `Restart=always` to make sure the client will keep trying to push logs, even if the server is temporarily not there, like this:
-
-```bash
-sudo systemctl edit systemd-journal-upload.service
-```
-
-At the top, add:
-
-```conf
-[Service]
-Restart=always
-```
-
-Enable `systemd-journal-upload.service`, like this:
-
-```bash
-sudo systemctl enable systemd-journal-upload.service
-```
-
-Assuming that you have in `/tmp` the relevant `runme-on-XXX.sh` script for this client, run:
-
-```bash
-sudo bash /tmp/runme-on-XXX.sh
-```
-
-This will install the certificates in `/etc/ssl/systemd-journal`, set the right file permissions, and update `/etc/systemd/journal-remote.conf` and `/etc/systemd/journal-upload.conf` to use the right certificate files.
-
-Finally, restart `systemd-journal-upload.service`:
-
-```bash
-sudo systemctl restart systemd-journal-upload.service
-```
-
-The client should now be pushing logs to the central server.
-
-> When done, remember to `rm /tmp/runme-on-*.sh` to make sure your certificates are secure.
-
-Here it is in action, in Netdata:
-
-![2023-10-18 16-23-05](https://github.com/netdata/netdata/assets/2662304/83bec232-4770-455b-8f1c-46b5de5f93a2)
-
-
-## Verify it works
-
-To verify the central server is receiving logs, run this on the central server:
-
-```bash
-sudo ls -l /var/log/journal/remote/
-```
-
-Depending on the `systemd` version you use, you should see new files from the clients' canonical names (as defined at their certificates) or IPs.
-
-Also, `systemctl status systemd-journal-remote` should show something like this:
-
-```bash
-systemd-journal-remote.service - Journal Remote Sink Service
- Loaded: loaded (/etc/systemd/system/systemd-journal-remote.service; indirect; preset: disabled)
- Active: active (running) since Sun 2023-10-15 14:29:46 EEST; 2h 24min ago
-TriggeredBy: ● systemd-journal-remote.socket
- Docs: man:systemd-journal-remote(8)
- man:journal-remote.conf(5)
- Main PID: 2118153 (systemd-journal)
- Status: "Processing requests..."
- Tasks: 1 (limit: 154152)
- Memory: 2.2M
- CPU: 71ms
- CGroup: /system.slice/systemd-journal-remote.service
- └─2118153 /usr/lib/systemd/systemd-journal-remote --listen-https=-3 --output=/var/log/journal/remote/
-```
-
-Note the `status: "Processing requests..."` and the PID under `CGroup`.
-
-On the client `systemctl status systemd-journal-upload` should show something like this:
-
-```bash
-● systemd-journal-upload.service - Journal Remote Upload Service
- Loaded: loaded (/lib/systemd/system/systemd-journal-upload.service; enabled; vendor preset: disabled)
- Drop-In: /etc/systemd/system/systemd-journal-upload.service.d
- └─override.conf
- Active: active (running) since Sun 2023-10-15 10:39:04 UTC; 3h 17min ago
- Docs: man:systemd-journal-upload(8)
- Main PID: 4169 (systemd-journal)
- Status: "Processing input..."
- Tasks: 1 (limit: 13868)
- Memory: 3.5M
- CPU: 1.081s
- CGroup: /system.slice/systemd-journal-upload.service
- └─4169 /lib/systemd/systemd-journal-upload --save-state
-```
-
-Note the `Status: "Processing input..."` and the PID under `CGroup`.
diff --git a/collectors/systemd-journal.plugin/systemd-internals.h b/collectors/systemd-journal.plugin/systemd-internals.h
deleted file mode 100644
index e1ae44d4f..000000000
--- a/collectors/systemd-journal.plugin/systemd-internals.h
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H
-#define NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H
-
-#include "collectors/all.h"
-#include "libnetdata/libnetdata.h"
-
-#include <linux/capability.h>
-#include <systemd/sd-journal.h>
-#include <syslog.h>
-
-#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries."
-#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal"
-#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60
-#define SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE 0.01
-#define SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS 250
-#define SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC (5 * 60 * USEC_PER_SEC)
-
-#define SYSTEMD_UNITS_FUNCTION_DESCRIPTION "View the status of systemd units"
-#define SYSTEMD_UNITS_FUNCTION_NAME "systemd-list-units"
-#define SYSTEMD_UNITS_DEFAULT_TIMEOUT 30
-
-extern __thread size_t fstat_thread_calls;
-extern __thread size_t fstat_thread_cached_responses;
-void fstat_cache_enable_on_thread(void);
-void fstat_cache_disable_on_thread(void);
-
-extern netdata_mutex_t stdout_mutex;
-
-typedef enum {
- ND_SD_JOURNAL_NO_FILE_MATCHED,
- ND_SD_JOURNAL_FAILED_TO_OPEN,
- ND_SD_JOURNAL_FAILED_TO_SEEK,
- ND_SD_JOURNAL_TIMED_OUT,
- ND_SD_JOURNAL_OK,
- ND_SD_JOURNAL_NOT_MODIFIED,
- ND_SD_JOURNAL_CANCELLED,
-} ND_SD_JOURNAL_STATUS;
-
-typedef enum {
- SDJF_NONE = 0,
- SDJF_ALL = (1 << 0),
- SDJF_LOCAL_ALL = (1 << 1),
- SDJF_REMOTE_ALL = (1 << 2),
- SDJF_LOCAL_SYSTEM = (1 << 3),
- SDJF_LOCAL_USER = (1 << 4),
- SDJF_LOCAL_NAMESPACE = (1 << 5),
- SDJF_LOCAL_OTHER = (1 << 6),
-} SD_JOURNAL_FILE_SOURCE_TYPE;
-
-struct journal_file {
- const char *filename;
- size_t filename_len;
- STRING *source;
- SD_JOURNAL_FILE_SOURCE_TYPE source_type;
- usec_t file_last_modified_ut;
- usec_t msg_first_ut;
- usec_t msg_last_ut;
- size_t size;
- bool logged_failure;
- bool logged_journalctl_failure;
- usec_t max_journal_vs_realtime_delta_ut;
-
- usec_t last_scan_monotonic_ut;
- usec_t last_scan_header_vs_last_modified_ut;
-
- uint64_t first_seqnum;
- uint64_t last_seqnum;
- sd_id128_t first_writer_id;
- sd_id128_t last_writer_id;
-
- uint64_t messages_in_file;
-};
-
-#define SDJF_SOURCE_ALL_NAME "all"
-#define SDJF_SOURCE_LOCAL_NAME "all-local-logs"
-#define SDJF_SOURCE_LOCAL_SYSTEM_NAME "all-local-system-logs"
-#define SDJF_SOURCE_LOCAL_USERS_NAME "all-local-user-logs"
-#define SDJF_SOURCE_LOCAL_OTHER_NAME "all-uncategorized"
-#define SDJF_SOURCE_NAMESPACES_NAME "all-local-namespaces"
-#define SDJF_SOURCE_REMOTES_NAME "all-remote-systems"
-
-#define ND_SD_JOURNAL_OPEN_FLAGS (0)
-
-#define JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT (5 * USEC_PER_SEC) // assume always 5 seconds latency
-#define JOURNAL_VS_REALTIME_DELTA_MAX_UT (2 * 60 * USEC_PER_SEC) // up to 2 minutes latency
-
-extern DICTIONARY *journal_files_registry;
-extern DICTIONARY *used_hashes_registry;
-extern DICTIONARY *function_query_status_dict;
-extern DICTIONARY *boot_ids_to_first_ut;
-
-int journal_file_dict_items_backward_compar(const void *a, const void *b);
-int journal_file_dict_items_forward_compar(const void *a, const void *b);
-void buffer_json_journal_versions(BUFFER *wb);
-void available_journal_file_sources_to_json_array(BUFFER *wb);
-bool journal_files_completed_once(void);
-void journal_files_registry_update(void);
-void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth);
-
-FACET_ROW_SEVERITY syslog_priority_to_facet_severity(FACETS *facets, FACET_ROW *row, void *data);
-
-void netdata_systemd_journal_dynamic_row_id(FACETS *facets, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data);
-void netdata_systemd_journal_transform_priority(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_syslog_facility(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_errno(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_boot_id(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_uid(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_gid(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_cap_effective(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-void netdata_systemd_journal_transform_timestamp_usec(FACETS *facets, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope, void *data);
-
-usec_t journal_file_update_annotation_boot_id(sd_journal *j, struct journal_file *jf, const char *boot_id);
-
-#define MAX_JOURNAL_DIRECTORIES 100
-struct journal_directory {
- char *path;
-};
-extern struct journal_directory journal_directories[MAX_JOURNAL_DIRECTORIES];
-
-void journal_init_files_and_directories(void);
-void journal_init_query_status(void);
-void function_systemd_journal(const char *transaction, char *function, int timeout, bool *cancelled);
-void journal_file_update_header(const char *filename, struct journal_file *jf);
-
-void netdata_systemd_journal_message_ids_init(void);
-void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused);
-
-void *journal_watcher_main(void *arg);
-
-#ifdef ENABLE_SYSTEMD_DBUS
-void function_systemd_units(const char *transaction, char *function, int timeout, bool *cancelled);
-#endif
-
-static inline void send_newline_and_flush(void) {
- netdata_mutex_lock(&stdout_mutex);
- fprintf(stdout, "\n");
- fflush(stdout);
- netdata_mutex_unlock(&stdout_mutex);
-}
-
-static inline bool parse_journal_field(const char *data, size_t data_length, const char **key, size_t *key_length, const char **value, size_t *value_length) {
- const char *k = data;
- const char *equal = strchr(k, '=');
- if(unlikely(!equal))
- return false;
-
- size_t kl = equal - k;
-
- const char *v = ++equal;
- size_t vl = data_length - kl - 1;
-
- *key = k;
- *key_length = kl;
- *value = v;
- *value_length = vl;
-
- return true;
-}
-
-#endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H
diff --git a/collectors/systemd-journal.plugin/systemd-journal-annotations.c b/collectors/systemd-journal.plugin/systemd-journal-annotations.c
deleted file mode 100644
index b12356110..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal-annotations.c
+++ /dev/null
@@ -1,719 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-
-const char *errno_map[] = {
- [1] = "1 (EPERM)", // "Operation not permitted",
- [2] = "2 (ENOENT)", // "No such file or directory",
- [3] = "3 (ESRCH)", // "No such process",
- [4] = "4 (EINTR)", // "Interrupted system call",
- [5] = "5 (EIO)", // "Input/output error",
- [6] = "6 (ENXIO)", // "No such device or address",
- [7] = "7 (E2BIG)", // "Argument list too long",
- [8] = "8 (ENOEXEC)", // "Exec format error",
- [9] = "9 (EBADF)", // "Bad file descriptor",
- [10] = "10 (ECHILD)", // "No child processes",
- [11] = "11 (EAGAIN)", // "Resource temporarily unavailable",
- [12] = "12 (ENOMEM)", // "Cannot allocate memory",
- [13] = "13 (EACCES)", // "Permission denied",
- [14] = "14 (EFAULT)", // "Bad address",
- [15] = "15 (ENOTBLK)", // "Block device required",
- [16] = "16 (EBUSY)", // "Device or resource busy",
- [17] = "17 (EEXIST)", // "File exists",
- [18] = "18 (EXDEV)", // "Invalid cross-device link",
- [19] = "19 (ENODEV)", // "No such device",
- [20] = "20 (ENOTDIR)", // "Not a directory",
- [21] = "21 (EISDIR)", // "Is a directory",
- [22] = "22 (EINVAL)", // "Invalid argument",
- [23] = "23 (ENFILE)", // "Too many open files in system",
- [24] = "24 (EMFILE)", // "Too many open files",
- [25] = "25 (ENOTTY)", // "Inappropriate ioctl for device",
- [26] = "26 (ETXTBSY)", // "Text file busy",
- [27] = "27 (EFBIG)", // "File too large",
- [28] = "28 (ENOSPC)", // "No space left on device",
- [29] = "29 (ESPIPE)", // "Illegal seek",
- [30] = "30 (EROFS)", // "Read-only file system",
- [31] = "31 (EMLINK)", // "Too many links",
- [32] = "32 (EPIPE)", // "Broken pipe",
- [33] = "33 (EDOM)", // "Numerical argument out of domain",
- [34] = "34 (ERANGE)", // "Numerical result out of range",
- [35] = "35 (EDEADLK)", // "Resource deadlock avoided",
- [36] = "36 (ENAMETOOLONG)", // "File name too long",
- [37] = "37 (ENOLCK)", // "No locks available",
- [38] = "38 (ENOSYS)", // "Function not implemented",
- [39] = "39 (ENOTEMPTY)", // "Directory not empty",
- [40] = "40 (ELOOP)", // "Too many levels of symbolic links",
- [42] = "42 (ENOMSG)", // "No message of desired type",
- [43] = "43 (EIDRM)", // "Identifier removed",
- [44] = "44 (ECHRNG)", // "Channel number out of range",
- [45] = "45 (EL2NSYNC)", // "Level 2 not synchronized",
- [46] = "46 (EL3HLT)", // "Level 3 halted",
- [47] = "47 (EL3RST)", // "Level 3 reset",
- [48] = "48 (ELNRNG)", // "Link number out of range",
- [49] = "49 (EUNATCH)", // "Protocol driver not attached",
- [50] = "50 (ENOCSI)", // "No CSI structure available",
- [51] = "51 (EL2HLT)", // "Level 2 halted",
- [52] = "52 (EBADE)", // "Invalid exchange",
- [53] = "53 (EBADR)", // "Invalid request descriptor",
- [54] = "54 (EXFULL)", // "Exchange full",
- [55] = "55 (ENOANO)", // "No anode",
- [56] = "56 (EBADRQC)", // "Invalid request code",
- [57] = "57 (EBADSLT)", // "Invalid slot",
- [59] = "59 (EBFONT)", // "Bad font file format",
- [60] = "60 (ENOSTR)", // "Device not a stream",
- [61] = "61 (ENODATA)", // "No data available",
- [62] = "62 (ETIME)", // "Timer expired",
- [63] = "63 (ENOSR)", // "Out of streams resources",
- [64] = "64 (ENONET)", // "Machine is not on the network",
- [65] = "65 (ENOPKG)", // "Package not installed",
- [66] = "66 (EREMOTE)", // "Object is remote",
- [67] = "67 (ENOLINK)", // "Link has been severed",
- [68] = "68 (EADV)", // "Advertise error",
- [69] = "69 (ESRMNT)", // "Srmount error",
- [70] = "70 (ECOMM)", // "Communication error on send",
- [71] = "71 (EPROTO)", // "Protocol error",
- [72] = "72 (EMULTIHOP)", // "Multihop attempted",
- [73] = "73 (EDOTDOT)", // "RFS specific error",
- [74] = "74 (EBADMSG)", // "Bad message",
- [75] = "75 (EOVERFLOW)", // "Value too large for defined data type",
- [76] = "76 (ENOTUNIQ)", // "Name not unique on network",
- [77] = "77 (EBADFD)", // "File descriptor in bad state",
- [78] = "78 (EREMCHG)", // "Remote address changed",
- [79] = "79 (ELIBACC)", // "Can not access a needed shared library",
- [80] = "80 (ELIBBAD)", // "Accessing a corrupted shared library",
- [81] = "81 (ELIBSCN)", // ".lib section in a.out corrupted",
- [82] = "82 (ELIBMAX)", // "Attempting to link in too many shared libraries",
- [83] = "83 (ELIBEXEC)", // "Cannot exec a shared library directly",
- [84] = "84 (EILSEQ)", // "Invalid or incomplete multibyte or wide character",
- [85] = "85 (ERESTART)", // "Interrupted system call should be restarted",
- [86] = "86 (ESTRPIPE)", // "Streams pipe error",
- [87] = "87 (EUSERS)", // "Too many users",
- [88] = "88 (ENOTSOCK)", // "Socket operation on non-socket",
- [89] = "89 (EDESTADDRREQ)", // "Destination address required",
- [90] = "90 (EMSGSIZE)", // "Message too long",
- [91] = "91 (EPROTOTYPE)", // "Protocol wrong type for socket",
- [92] = "92 (ENOPROTOOPT)", // "Protocol not available",
- [93] = "93 (EPROTONOSUPPORT)", // "Protocol not supported",
- [94] = "94 (ESOCKTNOSUPPORT)", // "Socket type not supported",
- [95] = "95 (ENOTSUP)", // "Operation not supported",
- [96] = "96 (EPFNOSUPPORT)", // "Protocol family not supported",
- [97] = "97 (EAFNOSUPPORT)", // "Address family not supported by protocol",
- [98] = "98 (EADDRINUSE)", // "Address already in use",
- [99] = "99 (EADDRNOTAVAIL)", // "Cannot assign requested address",
- [100] = "100 (ENETDOWN)", // "Network is down",
- [101] = "101 (ENETUNREACH)", // "Network is unreachable",
- [102] = "102 (ENETRESET)", // "Network dropped connection on reset",
- [103] = "103 (ECONNABORTED)", // "Software caused connection abort",
- [104] = "104 (ECONNRESET)", // "Connection reset by peer",
- [105] = "105 (ENOBUFS)", // "No buffer space available",
- [106] = "106 (EISCONN)", // "Transport endpoint is already connected",
- [107] = "107 (ENOTCONN)", // "Transport endpoint is not connected",
- [108] = "108 (ESHUTDOWN)", // "Cannot send after transport endpoint shutdown",
- [109] = "109 (ETOOMANYREFS)", // "Too many references: cannot splice",
- [110] = "110 (ETIMEDOUT)", // "Connection timed out",
- [111] = "111 (ECONNREFUSED)", // "Connection refused",
- [112] = "112 (EHOSTDOWN)", // "Host is down",
- [113] = "113 (EHOSTUNREACH)", // "No route to host",
- [114] = "114 (EALREADY)", // "Operation already in progress",
- [115] = "115 (EINPROGRESS)", // "Operation now in progress",
- [116] = "116 (ESTALE)", // "Stale file handle",
- [117] = "117 (EUCLEAN)", // "Structure needs cleaning",
- [118] = "118 (ENOTNAM)", // "Not a XENIX named type file",
- [119] = "119 (ENAVAIL)", // "No XENIX semaphores available",
- [120] = "120 (EISNAM)", // "Is a named type file",
- [121] = "121 (EREMOTEIO)", // "Remote I/O error",
- [122] = "122 (EDQUOT)", // "Disk quota exceeded",
- [123] = "123 (ENOMEDIUM)", // "No medium found",
- [124] = "124 (EMEDIUMTYPE)", // "Wrong medium type",
- [125] = "125 (ECANCELED)", // "Operation canceled",
- [126] = "126 (ENOKEY)", // "Required key not available",
- [127] = "127 (EKEYEXPIRED)", // "Key has expired",
- [128] = "128 (EKEYREVOKED)", // "Key has been revoked",
- [129] = "129 (EKEYREJECTED)", // "Key was rejected by service",
- [130] = "130 (EOWNERDEAD)", // "Owner died",
- [131] = "131 (ENOTRECOVERABLE)", // "State not recoverable",
- [132] = "132 (ERFKILL)", // "Operation not possible due to RF-kill",
- [133] = "133 (EHWPOISON)", // "Memory page has hardware error",
-};
-
-const char *linux_capabilities[] = {
- [CAP_CHOWN] = "CHOWN",
- [CAP_DAC_OVERRIDE] = "DAC_OVERRIDE",
- [CAP_DAC_READ_SEARCH] = "DAC_READ_SEARCH",
- [CAP_FOWNER] = "FOWNER",
- [CAP_FSETID] = "FSETID",
- [CAP_KILL] = "KILL",
- [CAP_SETGID] = "SETGID",
- [CAP_SETUID] = "SETUID",
- [CAP_SETPCAP] = "SETPCAP",
- [CAP_LINUX_IMMUTABLE] = "LINUX_IMMUTABLE",
- [CAP_NET_BIND_SERVICE] = "NET_BIND_SERVICE",
- [CAP_NET_BROADCAST] = "NET_BROADCAST",
- [CAP_NET_ADMIN] = "NET_ADMIN",
- [CAP_NET_RAW] = "NET_RAW",
- [CAP_IPC_LOCK] = "IPC_LOCK",
- [CAP_IPC_OWNER] = "IPC_OWNER",
- [CAP_SYS_MODULE] = "SYS_MODULE",
- [CAP_SYS_RAWIO] = "SYS_RAWIO",
- [CAP_SYS_CHROOT] = "SYS_CHROOT",
- [CAP_SYS_PTRACE] = "SYS_PTRACE",
- [CAP_SYS_PACCT] = "SYS_PACCT",
- [CAP_SYS_ADMIN] = "SYS_ADMIN",
- [CAP_SYS_BOOT] = "SYS_BOOT",
- [CAP_SYS_NICE] = "SYS_NICE",
- [CAP_SYS_RESOURCE] = "SYS_RESOURCE",
- [CAP_SYS_TIME] = "SYS_TIME",
- [CAP_SYS_TTY_CONFIG] = "SYS_TTY_CONFIG",
- [CAP_MKNOD] = "MKNOD",
- [CAP_LEASE] = "LEASE",
- [CAP_AUDIT_WRITE] = "AUDIT_WRITE",
- [CAP_AUDIT_CONTROL] = "AUDIT_CONTROL",
- [CAP_SETFCAP] = "SETFCAP",
- [CAP_MAC_OVERRIDE] = "MAC_OVERRIDE",
- [CAP_MAC_ADMIN] = "MAC_ADMIN",
- [CAP_SYSLOG] = "SYSLOG",
- [CAP_WAKE_ALARM] = "WAKE_ALARM",
- [CAP_BLOCK_SUSPEND] = "BLOCK_SUSPEND",
- [37 /*CAP_AUDIT_READ*/] = "AUDIT_READ",
- [38 /*CAP_PERFMON*/] = "PERFMON",
- [39 /*CAP_BPF*/] = "BPF",
- [40 /* CAP_CHECKPOINT_RESTORE */] = "CHECKPOINT_RESTORE",
-};
-
-static const char *syslog_facility_to_name(int facility) {
- switch (facility) {
- case LOG_FAC(LOG_KERN): return "kern";
- case LOG_FAC(LOG_USER): return "user";
- case LOG_FAC(LOG_MAIL): return "mail";
- case LOG_FAC(LOG_DAEMON): return "daemon";
- case LOG_FAC(LOG_AUTH): return "auth";
- case LOG_FAC(LOG_SYSLOG): return "syslog";
- case LOG_FAC(LOG_LPR): return "lpr";
- case LOG_FAC(LOG_NEWS): return "news";
- case LOG_FAC(LOG_UUCP): return "uucp";
- case LOG_FAC(LOG_CRON): return "cron";
- case LOG_FAC(LOG_AUTHPRIV): return "authpriv";
- case LOG_FAC(LOG_FTP): return "ftp";
- case LOG_FAC(LOG_LOCAL0): return "local0";
- case LOG_FAC(LOG_LOCAL1): return "local1";
- case LOG_FAC(LOG_LOCAL2): return "local2";
- case LOG_FAC(LOG_LOCAL3): return "local3";
- case LOG_FAC(LOG_LOCAL4): return "local4";
- case LOG_FAC(LOG_LOCAL5): return "local5";
- case LOG_FAC(LOG_LOCAL6): return "local6";
- case LOG_FAC(LOG_LOCAL7): return "local7";
- default: return NULL;
- }
-}
-
-static const char *syslog_priority_to_name(int priority) {
- switch (priority) {
- case LOG_ALERT: return "alert";
- case LOG_CRIT: return "critical";
- case LOG_DEBUG: return "debug";
- case LOG_EMERG: return "panic";
- case LOG_ERR: return "error";
- case LOG_INFO: return "info";
- case LOG_NOTICE: return "notice";
- case LOG_WARNING: return "warning";
- default: return NULL;
- }
-}
-
-FACET_ROW_SEVERITY syslog_priority_to_facet_severity(FACETS *facets __maybe_unused, FACET_ROW *row, void *data __maybe_unused) {
- // same to
- // https://github.com/systemd/systemd/blob/aab9e4b2b86905a15944a1ac81e471b5b7075932/src/basic/terminal-util.c#L1501
- // function get_log_colors()
-
- FACET_ROW_KEY_VALUE *priority_rkv = dictionary_get(row->dict, "PRIORITY");
- if(!priority_rkv || priority_rkv->empty)
- return FACET_ROW_SEVERITY_NORMAL;
-
- int priority = str2i(buffer_tostring(priority_rkv->wb));
-
- if(priority <= LOG_ERR)
- return FACET_ROW_SEVERITY_CRITICAL;
-
- else if (priority <= LOG_WARNING)
- return FACET_ROW_SEVERITY_WARNING;
-
- else if(priority <= LOG_NOTICE)
- return FACET_ROW_SEVERITY_NOTICE;
-
- else if(priority >= LOG_DEBUG)
- return FACET_ROW_SEVERITY_DEBUG;
-
- return FACET_ROW_SEVERITY_NORMAL;
-}
-
-static char *uid_to_username(uid_t uid, char *buffer, size_t buffer_size) {
- static __thread char tmp[1024 + 1];
- struct passwd pw, *result = NULL;
-
- if (getpwuid_r(uid, &pw, tmp, sizeof(tmp), &result) != 0 || !result || !pw.pw_name || !(*pw.pw_name))
- snprintfz(buffer, buffer_size - 1, "%u", uid);
- else
- snprintfz(buffer, buffer_size - 1, "%u (%s)", uid, pw.pw_name);
-
- return buffer;
-}
-
-static char *gid_to_groupname(gid_t gid, char* buffer, size_t buffer_size) {
- static __thread char tmp[1024];
- struct group grp, *result = NULL;
-
- if (getgrgid_r(gid, &grp, tmp, sizeof(tmp), &result) != 0 || !result || !grp.gr_name || !(*grp.gr_name))
- snprintfz(buffer, buffer_size - 1, "%u", gid);
- else
- snprintfz(buffer, buffer_size - 1, "%u (%s)", gid, grp.gr_name);
-
- return buffer;
-}
-
-void netdata_systemd_journal_transform_syslog_facility(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- int facility = str2i(buffer_tostring(wb));
- const char *name = syslog_facility_to_name(facility);
- if (name) {
- buffer_flush(wb);
- buffer_strcat(wb, name);
- }
- }
-}
-
-void netdata_systemd_journal_transform_priority(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- int priority = str2i(buffer_tostring(wb));
- const char *name = syslog_priority_to_name(priority);
- if (name) {
- buffer_flush(wb);
- buffer_strcat(wb, name);
- }
- }
-}
-
-void netdata_systemd_journal_transform_errno(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- unsigned err_no = str2u(buffer_tostring(wb));
- if(err_no > 0 && err_no < sizeof(errno_map) / sizeof(*errno_map)) {
- const char *name = errno_map[err_no];
- if(name) {
- buffer_flush(wb);
- buffer_strcat(wb, name);
- }
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-// UID and GID transformation
-
-#define UID_GID_HASHTABLE_SIZE 10000
-
-struct word_t2str_hashtable_entry {
- struct word_t2str_hashtable_entry *next;
- Word_t hash;
- size_t len;
- char str[];
-};
-
-struct word_t2str_hashtable {
- SPINLOCK spinlock;
- size_t size;
- struct word_t2str_hashtable_entry *hashtable[UID_GID_HASHTABLE_SIZE];
-};
-
-struct word_t2str_hashtable uid_hashtable = {
- .size = UID_GID_HASHTABLE_SIZE,
-};
-
-struct word_t2str_hashtable gid_hashtable = {
- .size = UID_GID_HASHTABLE_SIZE,
-};
-
-struct word_t2str_hashtable_entry **word_t2str_hashtable_slot(struct word_t2str_hashtable *ht, Word_t hash) {
- size_t slot = hash % ht->size;
- struct word_t2str_hashtable_entry **e = &ht->hashtable[slot];
-
- while(*e && (*e)->hash != hash)
- e = &((*e)->next);
-
- return e;
-}
-
-const char *uid_to_username_cached(uid_t uid, size_t *length) {
- spinlock_lock(&uid_hashtable.spinlock);
-
- struct word_t2str_hashtable_entry **e = word_t2str_hashtable_slot(&uid_hashtable, uid);
- if(!(*e)) {
- static __thread char buf[1024];
- const char *name = uid_to_username(uid, buf, sizeof(buf));
- size_t size = strlen(name) + 1;
-
- *e = callocz(1, sizeof(struct word_t2str_hashtable_entry) + size);
- (*e)->len = size - 1;
- (*e)->hash = uid;
- memcpy((*e)->str, name, size);
- }
-
- spinlock_unlock(&uid_hashtable.spinlock);
-
- *length = (*e)->len;
- return (*e)->str;
-}
-
-const char *gid_to_groupname_cached(gid_t gid, size_t *length) {
- spinlock_lock(&gid_hashtable.spinlock);
-
- struct word_t2str_hashtable_entry **e = word_t2str_hashtable_slot(&gid_hashtable, gid);
- if(!(*e)) {
- static __thread char buf[1024];
- const char *name = gid_to_groupname(gid, buf, sizeof(buf));
- size_t size = strlen(name) + 1;
-
- *e = callocz(1, sizeof(struct word_t2str_hashtable_entry) + size);
- (*e)->len = size - 1;
- (*e)->hash = gid;
- memcpy((*e)->str, name, size);
- }
-
- spinlock_unlock(&gid_hashtable.spinlock);
-
- *length = (*e)->len;
- return (*e)->str;
-}
-
-DICTIONARY *boot_ids_to_first_ut = NULL;
-
-void netdata_systemd_journal_transform_boot_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- const char *boot_id = buffer_tostring(wb);
- if(*boot_id && isxdigit(*boot_id)) {
- usec_t ut = UINT64_MAX;
- usec_t *p_ut = dictionary_get(boot_ids_to_first_ut, boot_id);
- if(!p_ut) {
-#ifndef HAVE_SD_JOURNAL_RESTART_FIELDS
- struct journal_file *jf;
- dfe_start_read(journal_files_registry, jf) {
- const char *files[2] = {
- [0] = jf_dfe.name,
- [1] = NULL,
- };
-
- sd_journal *j = NULL;
- int r = sd_journal_open_files(&j, files, ND_SD_JOURNAL_OPEN_FLAGS);
- if(r < 0 || !j) {
- internal_error(true, "JOURNAL: while looking for the first timestamp of boot_id '%s', "
- "sd_journal_open_files('%s') returned %d",
- boot_id, jf_dfe.name, r);
- continue;
- }
-
- ut = journal_file_update_annotation_boot_id(j, jf, boot_id);
- sd_journal_close(j);
- }
- dfe_done(jf);
-#endif
- }
- else
- ut = *p_ut;
-
- if(ut && ut != UINT64_MAX) {
- char buffer[RFC3339_MAX_LENGTH];
- rfc3339_datetime_ut(buffer, sizeof(buffer), ut, 0, true);
-
- switch(scope) {
- default:
- case FACETS_TRANSFORM_DATA:
- case FACETS_TRANSFORM_VALUE:
- buffer_sprintf(wb, " (%s) ", buffer);
- break;
-
- case FACETS_TRANSFORM_FACET:
- case FACETS_TRANSFORM_FACET_SORT:
- case FACETS_TRANSFORM_HISTOGRAM:
- buffer_flush(wb);
- buffer_sprintf(wb, "%s", buffer);
- break;
- }
- }
- }
-}
-
-void netdata_systemd_journal_transform_uid(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- uid_t uid = str2i(buffer_tostring(wb));
- size_t len;
- const char *name = uid_to_username_cached(uid, &len);
- buffer_contents_replace(wb, name, len);
- }
-}
-
-void netdata_systemd_journal_transform_gid(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- gid_t gid = str2i(buffer_tostring(wb));
- size_t len;
- const char *name = gid_to_groupname_cached(gid, &len);
- buffer_contents_replace(wb, name, len);
- }
-}
-
-void netdata_systemd_journal_transform_cap_effective(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- uint64_t cap = strtoul(buffer_tostring(wb), NULL, 16);
- if(cap) {
- buffer_fast_strcat(wb, " (", 2);
- for (size_t i = 0, added = 0; i < sizeof(linux_capabilities) / sizeof(linux_capabilities[0]); i++) {
- if (linux_capabilities[i] && (cap & (1ULL << i))) {
-
- if (added)
- buffer_fast_strcat(wb, " | ", 3);
-
- buffer_strcat(wb, linux_capabilities[i]);
- added++;
- }
- }
- buffer_fast_strcat(wb, ")", 1);
- }
- }
-}
-
-void netdata_systemd_journal_transform_timestamp_usec(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- if(scope == FACETS_TRANSFORM_FACET_SORT)
- return;
-
- const char *v = buffer_tostring(wb);
- if(*v && isdigit(*v)) {
- uint64_t ut = str2ull(buffer_tostring(wb), NULL);
- if(ut) {
- char buffer[RFC3339_MAX_LENGTH];
- rfc3339_datetime_ut(buffer, sizeof(buffer), ut, 6, true);
- buffer_sprintf(wb, " (%s)", buffer);
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-void netdata_systemd_journal_dynamic_row_id(FACETS *facets __maybe_unused, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data __maybe_unused) {
- FACET_ROW_KEY_VALUE *pid_rkv = dictionary_get(row->dict, "_PID");
- const char *pid = pid_rkv ? buffer_tostring(pid_rkv->wb) : FACET_VALUE_UNSET;
-
- const char *identifier = NULL;
- FACET_ROW_KEY_VALUE *container_name_rkv = dictionary_get(row->dict, "CONTAINER_NAME");
- if(container_name_rkv && !container_name_rkv->empty)
- identifier = buffer_tostring(container_name_rkv->wb);
-
- if(!identifier) {
- FACET_ROW_KEY_VALUE *syslog_identifier_rkv = dictionary_get(row->dict, "SYSLOG_IDENTIFIER");
- if(syslog_identifier_rkv && !syslog_identifier_rkv->empty)
- identifier = buffer_tostring(syslog_identifier_rkv->wb);
-
- if(!identifier) {
- FACET_ROW_KEY_VALUE *comm_rkv = dictionary_get(row->dict, "_COMM");
- if(comm_rkv && !comm_rkv->empty)
- identifier = buffer_tostring(comm_rkv->wb);
- }
- }
-
- buffer_flush(rkv->wb);
-
- if(!identifier || !*identifier)
- buffer_strcat(rkv->wb, FACET_VALUE_UNSET);
- else if(!pid || !*pid)
- buffer_sprintf(rkv->wb, "%s", identifier);
- else
- buffer_sprintf(rkv->wb, "%s[%s]", identifier, pid);
-
- buffer_json_add_array_item_string(json_array, buffer_tostring(rkv->wb));
-}
-
-
-// ----------------------------------------------------------------------------
-
-struct message_id_info {
- const char *msg;
-};
-
-static DICTIONARY *known_journal_messages_ids = NULL;
-
-void netdata_systemd_journal_message_ids_init(void) {
- known_journal_messages_ids = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
-
- struct message_id_info i = { 0 };
- i.msg = "Journal start"; dictionary_set(known_journal_messages_ids, "f77379a8490b408bbe5f6940505a777b", &i, sizeof(i));
- i.msg = "Journal stop"; dictionary_set(known_journal_messages_ids, "d93fb3c9c24d451a97cea615ce59c00b", &i, sizeof(i));
- i.msg = "Journal dropped"; dictionary_set(known_journal_messages_ids, "a596d6fe7bfa4994828e72309e95d61e", &i, sizeof(i));
- i.msg = "Journal missed"; dictionary_set(known_journal_messages_ids, "e9bf28e6e834481bb6f48f548ad13606", &i, sizeof(i));
- i.msg = "Journal usage"; dictionary_set(known_journal_messages_ids, "ec387f577b844b8fa948f33cad9a75e6", &i, sizeof(i));
- i.msg = "Coredump"; dictionary_set(known_journal_messages_ids, "fc2e22bc6ee647b6b90729ab34a250b1", &i, sizeof(i));
- i.msg = "Truncated core"; dictionary_set(known_journal_messages_ids, "5aadd8e954dc4b1a8c954d63fd9e1137", &i, sizeof(i));
- i.msg = "Backtrace"; dictionary_set(known_journal_messages_ids, "1f4e0a44a88649939aaea34fc6da8c95", &i, sizeof(i));
- i.msg = "Session start"; dictionary_set(known_journal_messages_ids, "8d45620c1a4348dbb17410da57c60c66", &i, sizeof(i));
- i.msg = "Session stop"; dictionary_set(known_journal_messages_ids, "3354939424b4456d9802ca8333ed424a", &i, sizeof(i));
- i.msg = "Seat start"; dictionary_set(known_journal_messages_ids, "fcbefc5da23d428093f97c82a9290f7b", &i, sizeof(i));
- i.msg = "Seat stop"; dictionary_set(known_journal_messages_ids, "e7852bfe46784ed0accde04bc864c2d5", &i, sizeof(i));
- i.msg = "Machine start"; dictionary_set(known_journal_messages_ids, "24d8d4452573402496068381a6312df2", &i, sizeof(i));
- i.msg = "Machine stop"; dictionary_set(known_journal_messages_ids, "58432bd3bace477cb514b56381b8a758", &i, sizeof(i));
- i.msg = "Time change"; dictionary_set(known_journal_messages_ids, "c7a787079b354eaaa9e77b371893cd27", &i, sizeof(i));
- i.msg = "Timezone change"; dictionary_set(known_journal_messages_ids, "45f82f4aef7a4bbf942ce861d1f20990", &i, sizeof(i));
- i.msg = "Tainted"; dictionary_set(known_journal_messages_ids, "50876a9db00f4c40bde1a2ad381c3a1b", &i, sizeof(i));
- i.msg = "Startup finished"; dictionary_set(known_journal_messages_ids, "b07a249cd024414a82dd00cd181378ff", &i, sizeof(i));
- i.msg = "User startup finished"; dictionary_set(known_journal_messages_ids, "eed00a68ffd84e31882105fd973abdd1", &i, sizeof(i));
- i.msg = "Sleep start"; dictionary_set(known_journal_messages_ids, "6bbd95ee977941e497c48be27c254128", &i, sizeof(i));
- i.msg = "Sleep stop"; dictionary_set(known_journal_messages_ids, "8811e6df2a8e40f58a94cea26f8ebf14", &i, sizeof(i));
- i.msg = "Shutdown"; dictionary_set(known_journal_messages_ids, "98268866d1d54a499c4e98921d93bc40", &i, sizeof(i));
- i.msg = "Factory reset"; dictionary_set(known_journal_messages_ids, "c14aaf76ec284a5fa1f105f88dfb061c", &i, sizeof(i));
- i.msg = "Crash exit"; dictionary_set(known_journal_messages_ids, "d9ec5e95e4b646aaaea2fd05214edbda", &i, sizeof(i));
- i.msg = "Crash failed"; dictionary_set(known_journal_messages_ids, "3ed0163e868a4417ab8b9e210407a96c", &i, sizeof(i));
- i.msg = "Crash freeze"; dictionary_set(known_journal_messages_ids, "645c735537634ae0a32b15a7c6cba7d4", &i, sizeof(i));
- i.msg = "Crash no coredump"; dictionary_set(known_journal_messages_ids, "5addb3a06a734d3396b794bf98fb2d01", &i, sizeof(i));
- i.msg = "Crash no fork"; dictionary_set(known_journal_messages_ids, "5c9e98de4ab94c6a9d04d0ad793bd903", &i, sizeof(i));
- i.msg = "Crash unknown signal"; dictionary_set(known_journal_messages_ids, "5e6f1f5e4db64a0eaee3368249d20b94", &i, sizeof(i));
- i.msg = "Crash systemd signal"; dictionary_set(known_journal_messages_ids, "83f84b35ee264f74a3896a9717af34cb", &i, sizeof(i));
- i.msg = "Crash process signal"; dictionary_set(known_journal_messages_ids, "3a73a98baf5b4b199929e3226c0be783", &i, sizeof(i));
- i.msg = "Crash waitpid failed"; dictionary_set(known_journal_messages_ids, "2ed18d4f78ca47f0a9bc25271c26adb4", &i, sizeof(i));
- i.msg = "Crash coredump failed"; dictionary_set(known_journal_messages_ids, "56b1cd96f24246c5b607666fda952356", &i, sizeof(i));
- i.msg = "Crash coredump pid"; dictionary_set(known_journal_messages_ids, "4ac7566d4d7548f4981f629a28f0f829", &i, sizeof(i));
- i.msg = "Crash shell fork failed"; dictionary_set(known_journal_messages_ids, "38e8b1e039ad469291b18b44c553a5b7", &i, sizeof(i));
- i.msg = "Crash execle failed"; dictionary_set(known_journal_messages_ids, "872729b47dbe473eb768ccecd477beda", &i, sizeof(i));
- i.msg = "Selinux failed"; dictionary_set(known_journal_messages_ids, "658a67adc1c940b3b3316e7e8628834a", &i, sizeof(i));
- i.msg = "Battery low warning"; dictionary_set(known_journal_messages_ids, "e6f456bd92004d9580160b2207555186", &i, sizeof(i));
- i.msg = "Battery low poweroff"; dictionary_set(known_journal_messages_ids, "267437d33fdd41099ad76221cc24a335", &i, sizeof(i));
- i.msg = "Core mainloop failed"; dictionary_set(known_journal_messages_ids, "79e05b67bc4545d1922fe47107ee60c5", &i, sizeof(i));
- i.msg = "Core no xdgdir path"; dictionary_set(known_journal_messages_ids, "dbb136b10ef4457ba47a795d62f108c9", &i, sizeof(i));
- i.msg = "Core capability bounding user"; dictionary_set(known_journal_messages_ids, "ed158c2df8884fa584eead2d902c1032", &i, sizeof(i));
- i.msg = "Core capability bounding"; dictionary_set(known_journal_messages_ids, "42695b500df048298bee37159caa9f2e", &i, sizeof(i));
- i.msg = "Core disable privileges"; dictionary_set(known_journal_messages_ids, "bfc2430724ab44499735b4f94cca9295", &i, sizeof(i));
- i.msg = "Core start target failed"; dictionary_set(known_journal_messages_ids, "59288af523be43a28d494e41e26e4510", &i, sizeof(i));
- i.msg = "Core isolate target failed"; dictionary_set(known_journal_messages_ids, "689b4fcc97b4486ea5da92db69c9e314", &i, sizeof(i));
- i.msg = "Core fd set failed"; dictionary_set(known_journal_messages_ids, "5ed836f1766f4a8a9fc5da45aae23b29", &i, sizeof(i));
- i.msg = "Core pid1 environment"; dictionary_set(known_journal_messages_ids, "6a40fbfbd2ba4b8db02fb40c9cd090d7", &i, sizeof(i));
- i.msg = "Core manager allocate"; dictionary_set(known_journal_messages_ids, "0e54470984ac419689743d957a119e2e", &i, sizeof(i));
- i.msg = "Smack failed write"; dictionary_set(known_journal_messages_ids, "d67fa9f847aa4b048a2ae33535331adb", &i, sizeof(i));
- i.msg = "Shutdown error"; dictionary_set(known_journal_messages_ids, "af55a6f75b544431b72649f36ff6d62c", &i, sizeof(i));
- i.msg = "Valgrind helper fork"; dictionary_set(known_journal_messages_ids, "d18e0339efb24a068d9c1060221048c2", &i, sizeof(i));
- i.msg = "Unit starting"; dictionary_set(known_journal_messages_ids, "7d4958e842da4a758f6c1cdc7b36dcc5", &i, sizeof(i));
- i.msg = "Unit started"; dictionary_set(known_journal_messages_ids, "39f53479d3a045ac8e11786248231fbf", &i, sizeof(i));
- i.msg = "Unit failed"; dictionary_set(known_journal_messages_ids, "be02cf6855d2428ba40df7e9d022f03d", &i, sizeof(i));
- i.msg = "Unit stopping"; dictionary_set(known_journal_messages_ids, "de5b426a63be47a7b6ac3eaac82e2f6f", &i, sizeof(i));
- i.msg = "Unit stopped"; dictionary_set(known_journal_messages_ids, "9d1aaa27d60140bd96365438aad20286", &i, sizeof(i));
- i.msg = "Unit reloading"; dictionary_set(known_journal_messages_ids, "d34d037fff1847e6ae669a370e694725", &i, sizeof(i));
- i.msg = "Unit reloaded"; dictionary_set(known_journal_messages_ids, "7b05ebc668384222baa8881179cfda54", &i, sizeof(i));
- i.msg = "Unit restart scheduled"; dictionary_set(known_journal_messages_ids, "5eb03494b6584870a536b337290809b3", &i, sizeof(i));
- i.msg = "Unit resources"; dictionary_set(known_journal_messages_ids, "ae8f7b866b0347b9af31fe1c80b127c0", &i, sizeof(i));
- i.msg = "Unit success"; dictionary_set(known_journal_messages_ids, "7ad2d189f7e94e70a38c781354912448", &i, sizeof(i));
- i.msg = "Unit skipped"; dictionary_set(known_journal_messages_ids, "0e4284a0caca4bfc81c0bb6786972673", &i, sizeof(i));
- i.msg = "Unit failure result"; dictionary_set(known_journal_messages_ids, "d9b373ed55a64feb8242e02dbe79a49c", &i, sizeof(i));
- i.msg = "Spawn failed"; dictionary_set(known_journal_messages_ids, "641257651c1b4ec9a8624d7a40a9e1e7", &i, sizeof(i));
- i.msg = "Unit process exit"; dictionary_set(known_journal_messages_ids, "98e322203f7a4ed290d09fe03c09fe15", &i, sizeof(i));
- i.msg = "Forward syslog missed"; dictionary_set(known_journal_messages_ids, "0027229ca0644181a76c4e92458afa2e", &i, sizeof(i));
- i.msg = "Overmounting"; dictionary_set(known_journal_messages_ids, "1dee0369c7fc4736b7099b38ecb46ee7", &i, sizeof(i));
- i.msg = "Unit oomd kill"; dictionary_set(known_journal_messages_ids, "d989611b15e44c9dbf31e3c81256e4ed", &i, sizeof(i));
- i.msg = "Unit out of memory"; dictionary_set(known_journal_messages_ids, "fe6faa94e7774663a0da52717891d8ef", &i, sizeof(i));
- i.msg = "Lid opened"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b06f", &i, sizeof(i));
- i.msg = "Lid closed"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b070", &i, sizeof(i));
- i.msg = "System docked"; dictionary_set(known_journal_messages_ids, "f5f416b862074b28927a48c3ba7d51ff", &i, sizeof(i));
- i.msg = "System undocked"; dictionary_set(known_journal_messages_ids, "51e171bd585248568110144c517cca53", &i, sizeof(i));
- i.msg = "Power key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b071", &i, sizeof(i));
- i.msg = "Power key long press"; dictionary_set(known_journal_messages_ids, "3e0117101eb243c1b9a50db3494ab10b", &i, sizeof(i));
- i.msg = "Reboot key"; dictionary_set(known_journal_messages_ids, "9fa9d2c012134ec385451ffe316f97d0", &i, sizeof(i));
- i.msg = "Reboot key long press"; dictionary_set(known_journal_messages_ids, "f1c59a58c9d943668965c337caec5975", &i, sizeof(i));
- i.msg = "Suspend key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b072", &i, sizeof(i));
- i.msg = "Suspend key long press"; dictionary_set(known_journal_messages_ids, "bfdaf6d312ab4007bc1fe40a15df78e8", &i, sizeof(i));
- i.msg = "Hibernate key"; dictionary_set(known_journal_messages_ids, "b72ea4a2881545a0b50e200e55b9b073", &i, sizeof(i));
- i.msg = "Hibernate key long press"; dictionary_set(known_journal_messages_ids, "167836df6f7f428e98147227b2dc8945", &i, sizeof(i));
- i.msg = "Invalid configuration"; dictionary_set(known_journal_messages_ids, "c772d24e9a884cbeb9ea12625c306c01", &i, sizeof(i));
- i.msg = "Dnssec failure"; dictionary_set(known_journal_messages_ids, "1675d7f172174098b1108bf8c7dc8f5d", &i, sizeof(i));
- i.msg = "Dnssec trust anchor revoked"; dictionary_set(known_journal_messages_ids, "4d4408cfd0d144859184d1e65d7c8a65", &i, sizeof(i));
- i.msg = "Dnssec downgrade"; dictionary_set(known_journal_messages_ids, "36db2dfa5a9045e1bd4af5f93e1cf057", &i, sizeof(i));
- i.msg = "Unsafe user name"; dictionary_set(known_journal_messages_ids, "b61fdac612e94b9182285b998843061f", &i, sizeof(i));
- i.msg = "Mount point path not suitable"; dictionary_set(known_journal_messages_ids, "1b3bb94037f04bbf81028e135a12d293", &i, sizeof(i));
- i.msg = "Device path not suitable"; dictionary_set(known_journal_messages_ids, "010190138f494e29a0ef6669749531aa", &i, sizeof(i));
- i.msg = "Nobody user unsuitable"; dictionary_set(known_journal_messages_ids, "b480325f9c394a7b802c231e51a2752c", &i, sizeof(i));
- i.msg = "Systemd udev settle deprecated"; dictionary_set(known_journal_messages_ids, "1c0454c1bd2241e0ac6fefb4bc631433", &i, sizeof(i));
- i.msg = "Time sync"; dictionary_set(known_journal_messages_ids, "7c8a41f37b764941a0e1780b1be2f037", &i, sizeof(i));
- i.msg = "Time bump"; dictionary_set(known_journal_messages_ids, "7db73c8af0d94eeb822ae04323fe6ab6", &i, sizeof(i));
- i.msg = "Shutdown scheduled"; dictionary_set(known_journal_messages_ids, "9e7066279dc8403da79ce4b1a69064b2", &i, sizeof(i));
- i.msg = "Shutdown canceled"; dictionary_set(known_journal_messages_ids, "249f6fb9e6e2428c96f3f0875681ffa3", &i, sizeof(i));
- i.msg = "TPM pcr extend"; dictionary_set(known_journal_messages_ids, "3f7d5ef3e54f4302b4f0b143bb270cab", &i, sizeof(i));
- i.msg = "Memory trim"; dictionary_set(known_journal_messages_ids, "f9b0be465ad540d0850ad32172d57c21", &i, sizeof(i));
- i.msg = "Sysv generator deprecated"; dictionary_set(known_journal_messages_ids, "a8fa8dacdb1d443e9503b8be367a6adb", &i, sizeof(i));
-
- // gnome
- // https://gitlab.gnome.org/GNOME/gnome-session/-/blob/main/gnome-session/gsm-manager.c
- i.msg = "Gnome SM startup succeeded"; dictionary_set(known_journal_messages_ids, "0ce153587afa4095832d233c17a88001", &i, sizeof(i));
- i.msg = "Gnome SM unrecoverable failure"; dictionary_set(known_journal_messages_ids, "10dd2dc188b54a5e98970f56499d1f73", &i, sizeof(i));
-
- // gnome-shell
- // https://gitlab.gnome.org/GNOME/gnome-shell/-/blob/main/js/ui/main.js#L56
- i.msg = "Gnome shell started";dictionary_set(known_journal_messages_ids, "f3ea493c22934e26811cd62abe8e203a", &i, sizeof(i));
-
- // flathub
- // https://docs.flatpak.org/de/latest/flatpak-command-reference.html
- i.msg = "Flatpak cache"; dictionary_set(known_journal_messages_ids, "c7b39b1e006b464599465e105b361485", &i, sizeof(i));
-
- // ???
- i.msg = "Flathub pulls"; dictionary_set(known_journal_messages_ids, "75ba3deb0af041a9a46272ff85d9e73e", &i, sizeof(i));
- i.msg = "Flathub pull errors"; dictionary_set(known_journal_messages_ids, "f02bce89a54e4efab3a94a797d26204a", &i, sizeof(i));
-
- // ??
- i.msg = "Boltd starting"; dictionary_set(known_journal_messages_ids, "dd11929c788e48bdbb6276fb5f26b08a", &i, sizeof(i));
-
- // Netdata
- i.msg = "Netdata connection from child"; dictionary_set(known_journal_messages_ids, "ed4cdb8f1beb4ad3b57cb3cae2d162fa", &i, sizeof(i));
- i.msg = "Netdata connection to parent"; dictionary_set(known_journal_messages_ids, "6e2e3839067648968b646045dbf28d66", &i, sizeof(i));
- i.msg = "Netdata alert transition"; dictionary_set(known_journal_messages_ids, "9ce0cb58ab8b44df82c4bf1ad9ee22de", &i, sizeof(i));
- i.msg = "Netdata alert notification"; dictionary_set(known_journal_messages_ids, "6db0018e83e34320ae2a659d78019fb7", &i, sizeof(i));
-}
-
-void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused, BUFFER *wb, FACETS_TRANSFORMATION_SCOPE scope __maybe_unused, void *data __maybe_unused) {
- const char *message_id = buffer_tostring(wb);
- struct message_id_info *i = dictionary_get(known_journal_messages_ids, message_id);
-
- if(!i)
- return;
-
- switch(scope) {
- default:
- case FACETS_TRANSFORM_DATA:
- case FACETS_TRANSFORM_VALUE:
- buffer_sprintf(wb, " (%s)", i->msg);
- break;
-
- case FACETS_TRANSFORM_FACET:
- case FACETS_TRANSFORM_FACET_SORT:
- case FACETS_TRANSFORM_HISTOGRAM:
- buffer_flush(wb);
- buffer_strcat(wb, i->msg);
- break;
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static void netdata_systemd_journal_rich_message(FACETS *facets __maybe_unused, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row __maybe_unused, void *data __maybe_unused) {
- buffer_json_add_array_item_object(json_array);
- buffer_json_member_add_string(json_array, "value", buffer_tostring(rkv->wb));
- buffer_json_object_close(json_array);
-}
diff --git a/collectors/systemd-journal.plugin/systemd-journal-files.c b/collectors/systemd-journal.plugin/systemd-journal-files.c
deleted file mode 100644
index 56496df22..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal-files.c
+++ /dev/null
@@ -1,857 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-
-#define SYSTEMD_JOURNAL_MAX_SOURCE_LEN 64
-#define VAR_LOG_JOURNAL_MAX_DEPTH 10
-
-struct journal_directory journal_directories[MAX_JOURNAL_DIRECTORIES] = { 0 };
-DICTIONARY *journal_files_registry = NULL;
-DICTIONARY *used_hashes_registry = NULL;
-
-static usec_t systemd_journal_session = 0;
-
-void buffer_json_journal_versions(BUFFER *wb) {
- buffer_json_member_add_object(wb, "versions");
- {
- buffer_json_member_add_uint64(wb, "sources",
- systemd_journal_session + dictionary_version(journal_files_registry));
- }
- buffer_json_object_close(wb);
-}
-
-static bool journal_sd_id128_parse(const char *in, sd_id128_t *ret) {
- while(isspace(*in))
- in++;
-
- char uuid[33];
- strncpyz(uuid, in, 32);
- uuid[32] = '\0';
-
- if(strlen(uuid) == 32) {
- sd_id128_t read;
- if(sd_id128_from_string(uuid, &read) == 0) {
- *ret = read;
- return true;
- }
- }
-
- return false;
-}
-
-static void journal_file_get_header_from_journalctl(const char *filename, struct journal_file *jf) {
- // unfortunately, our capabilities are not inheritted by journalctl
- // so, it fails to give us the information we need.
-
- bool read_writer = false, read_head = false, read_tail = false;
-
- char cmd[FILENAME_MAX * 2];
- snprintfz(cmd, sizeof(cmd), "journalctl --header --file '%s'", filename);
- CLEAN_BUFFER *wb = run_command_and_get_output_to_buffer(cmd, 1024);
- if(wb) {
- const char *s = buffer_tostring(wb);
-
- const char *sequential_id_header = "Sequential Number ID:";
- const char *sequential_id_data = strcasestr(s, sequential_id_header);
- if(sequential_id_data) {
- sequential_id_data += strlen(sequential_id_header);
- if(journal_sd_id128_parse(sequential_id_data, &jf->first_writer_id))
- read_writer = true;
- }
-
- const char *head_sequential_number_header = "Head sequential number:";
- const char *head_sequential_number_data = strcasestr(s, head_sequential_number_header);
- if(head_sequential_number_data) {
- head_sequential_number_data += strlen(head_sequential_number_header);
-
- while(isspace(*head_sequential_number_data))
- head_sequential_number_data++;
-
- if(isdigit(*head_sequential_number_data)) {
- jf->first_seqnum = strtoul(head_sequential_number_data, NULL, 10);
- if(jf->first_seqnum)
- read_head = true;
- }
- }
-
- const char *tail_sequential_number_header = "Tail sequential number:";
- const char *tail_sequential_number_data = strcasestr(s, tail_sequential_number_header);
- if(tail_sequential_number_data) {
- tail_sequential_number_data += strlen(tail_sequential_number_header);
-
- while(isspace(*tail_sequential_number_data))
- tail_sequential_number_data++;
-
- if(isdigit(*tail_sequential_number_data)) {
- jf->last_seqnum = strtoul(tail_sequential_number_data, NULL, 10);
- if(jf->last_seqnum)
- read_tail = true;
- }
- }
-
- if(read_head && read_tail && jf->last_seqnum > jf->first_seqnum)
- jf->messages_in_file = jf->last_seqnum - jf->first_seqnum;
- }
-
- if(!jf->logged_journalctl_failure && (!read_head || !read_head || !read_tail)) {
-
- nd_log(NDLS_COLLECTORS, NDLP_NOTICE,
- "Failed to read %s%s%s from journalctl's output on filename '%s', using the command: %s",
- read_writer?"":"writer id,",
- read_head?"":"head id,",
- read_tail?"":"tail id,",
- filename, cmd);
-
- jf->logged_journalctl_failure = true;
- }
-}
-
-usec_t journal_file_update_annotation_boot_id(sd_journal *j, struct journal_file *jf, const char *boot_id) {
- usec_t ut = UINT64_MAX;
- int r;
-
- char m[100];
- size_t len = snprintfz(m, sizeof(m), "_BOOT_ID=%s", boot_id);
-
- sd_journal_flush_matches(j);
-
- r = sd_journal_add_match(j, m, len);
- if(r < 0) {
- errno = -r;
- internal_error(true,
- "JOURNAL: while looking for the first timestamp of boot_id '%s', "
- "sd_journal_add_match('%s') on file '%s' returned %d",
- boot_id, m, jf->filename, r);
- return UINT64_MAX;
- }
-
- r = sd_journal_seek_head(j);
- if(r < 0) {
- errno = -r;
- internal_error(true,
- "JOURNAL: while looking for the first timestamp of boot_id '%s', "
- "sd_journal_seek_head() on file '%s' returned %d",
- boot_id, jf->filename, r);
- return UINT64_MAX;
- }
-
- r = sd_journal_next(j);
- if(r < 0) {
- errno = -r;
- internal_error(true,
- "JOURNAL: while looking for the first timestamp of boot_id '%s', "
- "sd_journal_next() on file '%s' returned %d",
- boot_id, jf->filename, r);
- return UINT64_MAX;
- }
-
- r = sd_journal_get_realtime_usec(j, &ut);
- if(r < 0 || !ut || ut == UINT64_MAX) {
- errno = -r;
- internal_error(r != -EADDRNOTAVAIL,
- "JOURNAL: while looking for the first timestamp of boot_id '%s', "
- "sd_journal_get_realtime_usec() on file '%s' returned %d",
- boot_id, jf->filename, r);
- return UINT64_MAX;
- }
-
- if(ut && ut != UINT64_MAX) {
- dictionary_set(boot_ids_to_first_ut, boot_id, &ut, sizeof(ut));
- return ut;
- }
-
- return UINT64_MAX;
-}
-
-static void journal_file_get_boot_id_annotations(sd_journal *j __maybe_unused, struct journal_file *jf __maybe_unused) {
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- sd_journal_flush_matches(j);
-
- int r = sd_journal_query_unique(j, "_BOOT_ID");
- if (r < 0) {
- errno = -r;
- internal_error(true,
- "JOURNAL: while querying for the unique _BOOT_ID values, "
- "sd_journal_query_unique() on file '%s' returned %d",
- jf->filename, r);
- errno = -r;
- return;
- }
-
- const void *data = NULL;
- size_t data_length;
-
- DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED);
-
- SD_JOURNAL_FOREACH_UNIQUE(j, data, data_length) {
- const char *key, *value;
- size_t key_length, value_length;
-
- if(!parse_journal_field(data, data_length, &key, &key_length, &value, &value_length))
- continue;
-
- if(value_length != 32)
- continue;
-
- char buf[33];
- memcpy(buf, value, 32);
- buf[32] = '\0';
-
- dictionary_set(dict, buf, NULL, 0);
- }
-
- void *nothing;
- dfe_start_read(dict, nothing){
- journal_file_update_annotation_boot_id(j, jf, nothing_dfe.name);
- }
- dfe_done(nothing);
-
- dictionary_destroy(dict);
-#endif
-}
-
-void journal_file_update_header(const char *filename, struct journal_file *jf) {
- if(jf->last_scan_header_vs_last_modified_ut == jf->file_last_modified_ut)
- return;
-
- fstat_cache_enable_on_thread();
-
- const char *files[2] = {
- [0] = filename,
- [1] = NULL,
- };
-
- sd_journal *j = NULL;
- if(sd_journal_open_files(&j, files, ND_SD_JOURNAL_OPEN_FLAGS) < 0 || !j) {
- netdata_log_error("JOURNAL: cannot open file '%s' to update msg_ut", filename);
- fstat_cache_disable_on_thread();
-
- if(!jf->logged_failure) {
- netdata_log_error("cannot open journal file '%s', using file timestamps to understand time-frame.", filename);
- jf->logged_failure = true;
- }
-
- jf->msg_first_ut = 0;
- jf->msg_last_ut = jf->file_last_modified_ut;
- jf->last_scan_header_vs_last_modified_ut = jf->file_last_modified_ut;
- return;
- }
-
- usec_t first_ut = 0, last_ut = 0;
- uint64_t first_seqnum = 0, last_seqnum = 0;
- sd_id128_t first_writer_id = SD_ID128_NULL, last_writer_id = SD_ID128_NULL;
-
- if(sd_journal_seek_head(j) < 0 || sd_journal_next(j) < 0 || sd_journal_get_realtime_usec(j, &first_ut) < 0 || !first_ut) {
- internal_error(true, "cannot find the timestamp of the first message in '%s'", filename);
- first_ut = 0;
- }
-#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- else {
- if(sd_journal_get_seqnum(j, &first_seqnum, &first_writer_id) < 0 || !first_seqnum) {
- internal_error(true, "cannot find the first seqnums of the first message in '%s'", filename);
- first_seqnum = 0;
- memset(&first_writer_id, 0, sizeof(first_writer_id));
- }
- }
-#endif
-
- if(sd_journal_seek_tail(j) < 0 || sd_journal_previous(j) < 0 || sd_journal_get_realtime_usec(j, &last_ut) < 0 || !last_ut) {
- internal_error(true, "cannot find the timestamp of the last message in '%s'", filename);
- last_ut = jf->file_last_modified_ut;
- }
-#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- else {
- if(sd_journal_get_seqnum(j, &last_seqnum, &last_writer_id) < 0 || !last_seqnum) {
- internal_error(true, "cannot find the last seqnums of the first message in '%s'", filename);
- last_seqnum = 0;
- memset(&last_writer_id, 0, sizeof(last_writer_id));
- }
- }
-#endif
-
- if(first_ut > last_ut) {
- internal_error(true, "timestamps are flipped in file '%s'", filename);
- usec_t t = first_ut;
- first_ut = last_ut;
- last_ut = t;
- }
-
- if(!first_seqnum || !first_ut) {
- // extract these from the filename - if possible
-
- const char *at = strchr(filename, '@');
- if(at) {
- const char *dash_seqnum = strchr(at + 1, '-');
- if(dash_seqnum) {
- const char *dash_first_msg_ut = strchr(dash_seqnum + 1, '-');
- if(dash_first_msg_ut) {
- const char *dot_journal = strstr(dash_first_msg_ut + 1, ".journal");
- if(dot_journal) {
- if(dash_seqnum - at - 1 == 32 &&
- dash_first_msg_ut - dash_seqnum - 1 == 16 &&
- dot_journal - dash_first_msg_ut - 1 == 16) {
- sd_id128_t writer;
- if(journal_sd_id128_parse(at + 1, &writer)) {
- char *endptr = NULL;
- uint64_t seqnum = strtoul(dash_seqnum + 1, &endptr, 16);
- if(endptr == dash_first_msg_ut) {
- uint64_t ts = strtoul(dash_first_msg_ut + 1, &endptr, 16);
- if(endptr == dot_journal) {
- first_seqnum = seqnum;
- first_writer_id = writer;
- first_ut = ts;
- }
- }
- }
- }
- }
- }
- }
- }
- }
-
- jf->first_seqnum = first_seqnum;
- jf->last_seqnum = last_seqnum;
-
- jf->first_writer_id = first_writer_id;
- jf->last_writer_id = last_writer_id;
-
- jf->msg_first_ut = first_ut;
- jf->msg_last_ut = last_ut;
-
- if(!jf->msg_last_ut)
- jf->msg_last_ut = jf->file_last_modified_ut;
-
- if(last_seqnum > first_seqnum) {
- if(!sd_id128_equal(first_writer_id, last_writer_id)) {
- jf->messages_in_file = 0;
- nd_log(NDLS_COLLECTORS, NDLP_NOTICE,
- "The writers of the first and the last message in file '%s' differ."
- , filename);
- }
- else
- jf->messages_in_file = last_seqnum - first_seqnum + 1;
- }
- else
- jf->messages_in_file = 0;
-
-// if(!jf->messages_in_file)
-// journal_file_get_header_from_journalctl(filename, jf);
-
- journal_file_get_boot_id_annotations(j, jf);
- sd_journal_close(j);
- fstat_cache_disable_on_thread();
-
- jf->last_scan_header_vs_last_modified_ut = jf->file_last_modified_ut;
-
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "Journal file header updated '%s'",
- jf->filename);
-}
-
-static STRING *string_strdupz_source(const char *s, const char *e, size_t max_len, const char *prefix) {
- char buf[max_len];
- size_t len;
- char *dst = buf;
-
- if(prefix) {
- len = strlen(prefix);
- memcpy(buf, prefix, len);
- dst = &buf[len];
- max_len -= len;
- }
-
- len = e - s;
- if(len >= max_len)
- len = max_len - 1;
- memcpy(dst, s, len);
- dst[len] = '\0';
- buf[max_len - 1] = '\0';
-
- for(size_t i = 0; buf[i] ;i++)
- if(!isalnum(buf[i]) && buf[i] != '-' && buf[i] != '.' && buf[i] != ':')
- buf[i] = '_';
-
- return string_strdupz(buf);
-}
-
-static void files_registry_insert_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) {
- struct journal_file *jf = value;
- jf->filename = dictionary_acquired_item_name(item);
- jf->filename_len = strlen(jf->filename);
- jf->source_type = SDJF_ALL;
-
- // based on the filename
- // decide the source to show to the user
- const char *s = strrchr(jf->filename, '/');
- if(s) {
- if(strstr(jf->filename, "/remote/")) {
- jf->source_type |= SDJF_REMOTE_ALL;
-
- if(strncmp(s, "/remote-", 8) == 0) {
- s = &s[8]; // skip "/remote-"
-
- char *e = strchr(s, '@');
- if(!e)
- e = strstr(s, ".journal");
-
- if(e) {
- const char *d = s;
- for(; d < e && (isdigit(*d) || *d == '.' || *d == ':') ; d++) ;
- if(d == e) {
- // a valid IP address
- char ip[e - s + 1];
- memcpy(ip, s, e - s);
- ip[e - s] = '\0';
- char buf[SYSTEMD_JOURNAL_MAX_SOURCE_LEN];
- if(ip_to_hostname(ip, buf, sizeof(buf)))
- jf->source = string_strdupz_source(buf, &buf[strlen(buf)], SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-");
- else {
- internal_error(true, "Cannot find the hostname for IP '%s'", ip);
- jf->source = string_strdupz_source(s, e, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-");
- }
- }
- else
- jf->source = string_strdupz_source(s, e, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "remote-");
- }
- }
- }
- else {
- jf->source_type |= SDJF_LOCAL_ALL;
-
- const char *t = s - 1;
- while(t >= jf->filename && *t != '.' && *t != '/')
- t--;
-
- if(t >= jf->filename && *t == '.') {
- jf->source_type |= SDJF_LOCAL_NAMESPACE;
- jf->source = string_strdupz_source(t + 1, s, SYSTEMD_JOURNAL_MAX_SOURCE_LEN, "namespace-");
- }
- else if(strncmp(s, "/system", 7) == 0)
- jf->source_type |= SDJF_LOCAL_SYSTEM;
-
- else if(strncmp(s, "/user", 5) == 0)
- jf->source_type |= SDJF_LOCAL_USER;
-
- else
- jf->source_type |= SDJF_LOCAL_OTHER;
- }
- }
- else
- jf->source_type |= SDJF_LOCAL_ALL | SDJF_LOCAL_OTHER;
-
- jf->msg_last_ut = jf->file_last_modified_ut;
-
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "Journal file added to the journal files registry: '%s'",
- jf->filename);
-}
-
-static bool files_registry_conflict_cb(const DICTIONARY_ITEM *item, void *old_value, void *new_value, void *data __maybe_unused) {
- struct journal_file *jf = old_value;
- struct journal_file *njf = new_value;
-
- if(njf->last_scan_monotonic_ut > jf->last_scan_monotonic_ut)
- jf->last_scan_monotonic_ut = njf->last_scan_monotonic_ut;
-
- if(njf->file_last_modified_ut > jf->file_last_modified_ut) {
- jf->file_last_modified_ut = njf->file_last_modified_ut;
- jf->size = njf->size;
-
- jf->msg_last_ut = jf->file_last_modified_ut;
-
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "Journal file updated to the journal files registry '%s'",
- jf->filename);
- }
-
- return false;
-}
-
-struct journal_file_source {
- usec_t first_ut;
- usec_t last_ut;
- size_t count;
- uint64_t size;
-};
-
-static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) {
- if(size > 1024ULL * 1024 * 1024 * 1024)
- snprintfz(dst, dst_len, "%0.2f TiB", (double)size / 1024.0 / 1024.0 / 1024.0 / 1024.0);
- else if(size > 1024ULL * 1024 * 1024)
- snprintfz(dst, dst_len, "%0.2f GiB", (double)size / 1024.0 / 1024.0 / 1024.0);
- else if(size > 1024ULL * 1024)
- snprintfz(dst, dst_len, "%0.2f MiB", (double)size / 1024.0 / 1024.0);
- else if(size > 1024ULL)
- snprintfz(dst, dst_len, "%0.2f KiB", (double)size / 1024.0);
- else
- snprintfz(dst, dst_len, "%"PRIu64" B", size);
-}
-
-#define print_duration(dst, dst_len, pos, remaining, duration, one, many, printed) do { \
- if((remaining) > (duration)) { \
- uint64_t _count = (remaining) / (duration); \
- uint64_t _rem = (remaining) - (_count * (duration)); \
- (pos) += snprintfz(&(dst)[pos], (dst_len) - (pos), "%s%s%"PRIu64" %s", (printed) ? ", " : "", _rem ? "" : "and ", _count, _count > 1 ? (many) : (one)); \
- (remaining) = _rem; \
- (printed) = true; \
- } \
-} while(0)
-
-static void human_readable_duration_s(time_t duration_s, char *dst, size_t dst_len) {
- if(duration_s < 0)
- duration_s = -duration_s;
-
- size_t pos = 0;
- dst[0] = 0 ;
-
- bool printed = false;
- print_duration(dst, dst_len, pos, duration_s, 86400 * 365, "year", "years", printed);
- print_duration(dst, dst_len, pos, duration_s, 86400 * 30, "month", "months", printed);
- print_duration(dst, dst_len, pos, duration_s, 86400 * 1, "day", "days", printed);
- print_duration(dst, dst_len, pos, duration_s, 3600 * 1, "hour", "hours", printed);
- print_duration(dst, dst_len, pos, duration_s, 60 * 1, "min", "mins", printed);
- print_duration(dst, dst_len, pos, duration_s, 1, "sec", "secs", printed);
-}
-
-static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) {
- struct journal_file_source *jfs = entry;
- BUFFER *wb = data;
-
- const char *name = dictionary_acquired_item_name(item);
-
- buffer_json_add_array_item_object(wb);
- {
- char size_for_humans[100];
- human_readable_size_ib(jfs->size, size_for_humans, sizeof(size_for_humans));
-
- char duration_for_humans[1024];
- human_readable_duration_s((time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC),
- duration_for_humans, sizeof(duration_for_humans));
-
- char info[1024];
- snprintfz(info, sizeof(info), "%zu files, with a total size of %s, covering %s",
- jfs->count, size_for_humans, duration_for_humans);
-
- buffer_json_member_add_string(wb, "id", name);
- buffer_json_member_add_string(wb, "name", name);
- buffer_json_member_add_string(wb, "pill", size_for_humans);
- buffer_json_member_add_string(wb, "info", info);
- }
- buffer_json_object_close(wb); // options object
-
- return 1;
-}
-
-static bool journal_file_merge_sizes(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value , void *data __maybe_unused) {
- struct journal_file_source *jfs = old_value, *njfs = new_value;
- jfs->count += njfs->count;
- jfs->size += njfs->size;
-
- if(njfs->first_ut && njfs->first_ut < jfs->first_ut)
- jfs->first_ut = njfs->first_ut;
-
- if(njfs->last_ut && njfs->last_ut > jfs->last_ut)
- jfs->last_ut = njfs->last_ut;
-
- return false;
-}
-
-void available_journal_file_sources_to_json_array(BUFFER *wb) {
- DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_NAME_LINK_DONT_CLONE|DICT_OPTION_DONT_OVERWRITE_VALUE);
- dictionary_register_conflict_callback(dict, journal_file_merge_sizes, NULL);
-
- struct journal_file_source t = { 0 };
-
- struct journal_file *jf;
- dfe_start_read(journal_files_registry, jf) {
- t.first_ut = jf->msg_first_ut;
- t.last_ut = jf->msg_last_ut;
- t.count = 1;
- t.size = jf->size;
-
- dictionary_set(dict, SDJF_SOURCE_ALL_NAME, &t, sizeof(t));
-
- if(jf->source_type & SDJF_LOCAL_ALL)
- dictionary_set(dict, SDJF_SOURCE_LOCAL_NAME, &t, sizeof(t));
- if(jf->source_type & SDJF_LOCAL_SYSTEM)
- dictionary_set(dict, SDJF_SOURCE_LOCAL_SYSTEM_NAME, &t, sizeof(t));
- if(jf->source_type & SDJF_LOCAL_USER)
- dictionary_set(dict, SDJF_SOURCE_LOCAL_USERS_NAME, &t, sizeof(t));
- if(jf->source_type & SDJF_LOCAL_OTHER)
- dictionary_set(dict, SDJF_SOURCE_LOCAL_OTHER_NAME, &t, sizeof(t));
- if(jf->source_type & SDJF_LOCAL_NAMESPACE)
- dictionary_set(dict, SDJF_SOURCE_NAMESPACES_NAME, &t, sizeof(t));
- if(jf->source_type & SDJF_REMOTE_ALL)
- dictionary_set(dict, SDJF_SOURCE_REMOTES_NAME, &t, sizeof(t));
- if(jf->source)
- dictionary_set(dict, string2str(jf->source), &t, sizeof(t));
- }
- dfe_done(jf);
-
- dictionary_sorted_walkthrough_read(dict, journal_file_to_json_array_cb, wb);
-
- dictionary_destroy(dict);
-}
-
-static void files_registry_delete_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) {
- struct journal_file *jf = value; (void)jf;
- const char *filename = dictionary_acquired_item_name(item); (void)filename;
-
- internal_error(true, "removed journal file '%s'", filename);
- string_freez(jf->source);
-}
-
-void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) {
- static const char *ext = ".journal";
- static const ssize_t ext_len = sizeof(".journal") - 1;
-
- if (depth > VAR_LOG_JOURNAL_MAX_DEPTH)
- return;
-
- DIR *dir;
- struct dirent *entry;
- char full_path[FILENAME_MAX];
-
- // Open the directory.
- if ((dir = opendir(dirname)) == NULL) {
- if(errno != ENOENT && errno != ENOTDIR)
- netdata_log_error("Cannot opendir() '%s'", dirname);
- return;
- }
-
- bool existing = false;
- bool *found = dictionary_set(dirs, dirname, &existing, sizeof(existing));
- if(*found) return;
- *found = true;
-
- // Read each entry in the directory.
- while ((entry = readdir(dir)) != NULL) {
- if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
- continue;
-
- ssize_t len = snprintfz(full_path, sizeof(full_path), "%s/%s", dirname, entry->d_name);
-
- if (entry->d_type == DT_DIR) {
- journal_directory_scan_recursively(files, dirs, full_path, depth++);
- }
- else if (entry->d_type == DT_REG && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) {
- if(files)
- dictionary_set(files, full_path, NULL, 0);
-
- send_newline_and_flush();
- }
- else if (entry->d_type == DT_LNK) {
- struct stat info;
- if (stat(full_path, &info) == -1)
- continue;
-
- if (S_ISDIR(info.st_mode)) {
- // The symbolic link points to a directory
- char resolved_path[FILENAME_MAX + 1];
- if (realpath(full_path, resolved_path) != NULL) {
- journal_directory_scan_recursively(files, dirs, resolved_path, depth++);
- }
- }
- else if(S_ISREG(info.st_mode) && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) {
- if(files)
- dictionary_set(files, full_path, NULL, 0);
-
- send_newline_and_flush();
- }
- }
- }
-
- closedir(dir);
-}
-
-static size_t journal_files_scans = 0;
-bool journal_files_completed_once(void) {
- return journal_files_scans > 0;
-}
-
-int filenames_compar(const void *a, const void *b) {
- const char *p1 = *(const char **)a;
- const char *p2 = *(const char **)b;
-
- const char *at1 = strchr(p1, '@');
- const char *at2 = strchr(p2, '@');
-
- if(!at1 && at2)
- return -1;
-
- if(at1 && !at2)
- return 1;
-
- if(!at1 && !at2)
- return strcmp(p1, p2);
-
- const char *dash1 = strrchr(at1, '-');
- const char *dash2 = strrchr(at2, '-');
-
- if(!dash1 || !dash2)
- return strcmp(p1, p2);
-
- uint64_t ts1 = strtoul(dash1 + 1, NULL, 16);
- uint64_t ts2 = strtoul(dash2 + 1, NULL, 16);
-
- if(ts1 > ts2)
- return -1;
-
- if(ts1 < ts2)
- return 1;
-
- return -strcmp(p1, p2);
-}
-
-void journal_files_registry_update(void) {
- static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER;
-
- if(spinlock_trylock(&spinlock)) {
- usec_t scan_monotonic_ut = now_monotonic_usec();
-
- DICTIONARY *files = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
- DICTIONARY *dirs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
-
- for(unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) {
- if(!journal_directories[i].path) break;
- journal_directory_scan_recursively(files, dirs, journal_directories[i].path, 0);
- }
-
- const char **array = mallocz(sizeof(const char *) * dictionary_entries(files));
- size_t used = 0;
-
- void *x;
- dfe_start_read(files, x) {
- if(used >= dictionary_entries(files)) continue;
- array[used++] = x_dfe.name;
- }
- dfe_done(x);
-
- qsort(array, used, sizeof(const char *), filenames_compar);
-
- for(size_t i = 0; i < used ;i++) {
- const char *full_path = array[i];
-
- struct stat info;
- if (stat(full_path, &info) == -1)
- continue;
-
- struct journal_file t = {
- .file_last_modified_ut = info.st_mtim.tv_sec * USEC_PER_SEC + info.st_mtim.tv_nsec / NSEC_PER_USEC,
- .last_scan_monotonic_ut = scan_monotonic_ut,
- .size = info.st_size,
- .max_journal_vs_realtime_delta_ut = JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT,
- };
- struct journal_file *jf = dictionary_set(journal_files_registry, full_path, &t, sizeof(t));
- journal_file_update_header(jf->filename, jf);
- }
- freez(array);
- dictionary_destroy(files);
- dictionary_destroy(dirs);
-
- struct journal_file *jf;
- dfe_start_write(journal_files_registry, jf){
- if(jf->last_scan_monotonic_ut < scan_monotonic_ut)
- dictionary_del(journal_files_registry, jf_dfe.name);
- }
- dfe_done(jf);
-
- journal_files_scans++;
- spinlock_unlock(&spinlock);
-
- internal_error(true,
- "Journal library scan completed in %.3f ms",
- (double)(now_monotonic_usec() - scan_monotonic_ut) / (double)USEC_PER_MS);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-int journal_file_dict_items_backward_compar(const void *a, const void *b) {
- const DICTIONARY_ITEM **ad = (const DICTIONARY_ITEM **)a, **bd = (const DICTIONARY_ITEM **)b;
- struct journal_file *jfa = dictionary_acquired_item_value(*ad);
- struct journal_file *jfb = dictionary_acquired_item_value(*bd);
-
- // compare the last message timestamps
- if(jfa->msg_last_ut < jfb->msg_last_ut)
- return 1;
-
- if(jfa->msg_last_ut > jfb->msg_last_ut)
- return -1;
-
- // compare the file last modification timestamps
- if(jfa->file_last_modified_ut < jfb->file_last_modified_ut)
- return 1;
-
- if(jfa->file_last_modified_ut > jfb->file_last_modified_ut)
- return -1;
-
- // compare the first message timestamps
- if(jfa->msg_first_ut < jfb->msg_first_ut)
- return 1;
-
- if(jfa->msg_first_ut > jfb->msg_first_ut)
- return -1;
-
- return 0;
-}
-
-int journal_file_dict_items_forward_compar(const void *a, const void *b) {
- return -journal_file_dict_items_backward_compar(a, b);
-}
-
-static bool boot_id_conflict_cb(const DICTIONARY_ITEM *item, void *old_value, void *new_value, void *data __maybe_unused) {
- usec_t *old_usec = old_value;
- usec_t *new_usec = new_value;
-
- if(*new_usec < *old_usec) {
- *old_usec = *new_usec;
- return true;
- }
-
- return false;
-}
-
-void journal_init_files_and_directories(void) {
- unsigned d = 0;
-
- // ------------------------------------------------------------------------
- // setup the journal directories
-
- journal_directories[d++].path = strdupz("/run/log/journal");
- journal_directories[d++].path = strdupz("/var/log/journal");
-
- if(*netdata_configured_host_prefix) {
- char path[PATH_MAX];
- snprintfz(path, sizeof(path), "%s/var/log/journal", netdata_configured_host_prefix);
- journal_directories[d++].path = strdupz(path);
- snprintfz(path, sizeof(path), "%s/run/log/journal", netdata_configured_host_prefix);
- journal_directories[d++].path = strdupz(path);
- }
-
- // terminate the list
- journal_directories[d].path = NULL;
-
- // ------------------------------------------------------------------------
- // initialize the used hashes files registry
-
- used_hashes_registry = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
-
- systemd_journal_session = (now_realtime_usec() / USEC_PER_SEC) * USEC_PER_SEC;
-
- journal_files_registry = dictionary_create_advanced(
- DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
- NULL, sizeof(struct journal_file));
-
- dictionary_register_insert_callback(journal_files_registry, files_registry_insert_cb, NULL);
- dictionary_register_delete_callback(journal_files_registry, files_registry_delete_cb, NULL);
- dictionary_register_conflict_callback(journal_files_registry, files_registry_conflict_cb, NULL);
-
- boot_ids_to_first_ut = dictionary_create_advanced(
- DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
- NULL, sizeof(usec_t));
-
- dictionary_register_conflict_callback(boot_ids_to_first_ut, boot_id_conflict_cb, NULL);
-
-}
diff --git a/collectors/systemd-journal.plugin/systemd-journal-fstat.c b/collectors/systemd-journal.plugin/systemd-journal-fstat.c
deleted file mode 100644
index 45ea78174..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal-fstat.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-
-
-// ----------------------------------------------------------------------------
-// fstat64 overloading to speed up libsystemd
-// https://github.com/systemd/systemd/pull/29261
-
-#include <dlfcn.h>
-#include <sys/stat.h>
-
-#define FSTAT_CACHE_MAX 1024
-struct fdstat64_cache_entry {
- bool enabled;
- bool updated;
- int err_no;
- struct stat64 stat;
- int ret;
- size_t cached_count;
- size_t session;
-};
-
-struct fdstat64_cache_entry fstat64_cache[FSTAT_CACHE_MAX] = {0 };
-__thread size_t fstat_thread_calls = 0;
-__thread size_t fstat_thread_cached_responses = 0;
-static __thread bool enable_thread_fstat = false;
-static __thread size_t fstat_caching_thread_session = 0;
-static size_t fstat_caching_global_session = 0;
-
-void fstat_cache_enable_on_thread(void) {
- fstat_caching_thread_session = __atomic_add_fetch(&fstat_caching_global_session, 1, __ATOMIC_ACQUIRE);
- enable_thread_fstat = true;
-}
-
-void fstat_cache_disable_on_thread(void) {
- fstat_caching_thread_session = __atomic_add_fetch(&fstat_caching_global_session, 1, __ATOMIC_RELEASE);
- enable_thread_fstat = false;
-}
-
-int fstat64(int fd, struct stat64 *buf) {
- static int (*real_fstat)(int, struct stat64 *) = NULL;
- if (!real_fstat)
- real_fstat = dlsym(RTLD_NEXT, "fstat64");
-
- fstat_thread_calls++;
-
- if(fd >= 0 && fd < FSTAT_CACHE_MAX) {
- if(enable_thread_fstat && fstat64_cache[fd].session != fstat_caching_thread_session) {
- fstat64_cache[fd].session = fstat_caching_thread_session;
- fstat64_cache[fd].enabled = true;
- fstat64_cache[fd].updated = false;
- }
-
- if(fstat64_cache[fd].enabled && fstat64_cache[fd].updated && fstat64_cache[fd].session == fstat_caching_thread_session) {
- fstat_thread_cached_responses++;
- errno = fstat64_cache[fd].err_no;
- *buf = fstat64_cache[fd].stat;
- fstat64_cache[fd].cached_count++;
- return fstat64_cache[fd].ret;
- }
- }
-
- int ret = real_fstat(fd, buf);
-
- if(fd >= 0 && fd < FSTAT_CACHE_MAX && fstat64_cache[fd].enabled && fstat64_cache[fd].session == fstat_caching_thread_session) {
- fstat64_cache[fd].ret = ret;
- fstat64_cache[fd].updated = true;
- fstat64_cache[fd].err_no = errno;
- fstat64_cache[fd].stat = *buf;
- }
-
- return ret;
-}
diff --git a/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh b/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh
deleted file mode 100755
index ada735f1f..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal-self-signed-certs.sh
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/env bash
-
-me="${0}"
-dst="/etc/ssl/systemd-journal"
-
-show_usage() {
- cat <<EOFUSAGE
-
-${me} [options] server_name alias1 alias2 ...
-
-server_name
- the canonical name of the server on the certificates
-
-aliasN
- a hostname or IP this server is reachable with
- DNS names should be like DNS:hostname
- IPs should be like IP:1.2.3.4
- Any number of aliases are accepted per server
-
-options can be:
-
- -h, --help
- show this message
-
- -d, --directory DIRECTORY
- change the default certificates install dir
- default: ${dst}
-
-EOFUSAGE
-}
-
-while [ ! -z "${1}" ]; do
- case "${1}" in
- -h|--help)
- show_usage
- exit 0
- ;;
-
- -d|--directory)
- dst="${2}"
- echo >&2 "directory set to: ${dst}"
- shift
- ;;
-
- *)
- break 2
- ;;
- esac
-
- shift
-done
-
-if [ -z "${1}" ]; then
- show_usage
- exit 1
-fi
-
-
-# Define a regular expression pattern for a valid canonical name
-valid_canonical_name_pattern="^[a-zA-Z0-9][a-zA-Z0-9.-]+$"
-
-# Check if ${1} matches the pattern
-if [[ ! "${1}" =~ ${valid_canonical_name_pattern} ]]; then
- echo "Certificate name '${1}' is not valid."
- exit 1
-fi
-
-# -----------------------------------------------------------------------------
-# Create the CA
-
-# stop on all errors
-set -e
-
-if [ $UID -ne 0 ]
-then
- echo >&2 "Hey! sudo me: sudo ${me}"
- exit 1
-fi
-
-if ! getent group systemd-journal >/dev/null 2>&1; then
- echo >&2 "Missing system group: systemd-journal. Did you install systemd-journald?"
- exit 1
-fi
-
-if ! getent passwd systemd-journal-remote >/dev/null 2>&1; then
- echo >&2 "Missing system user: systemd-journal-remote. Did you install systemd-journal-remote?"
- exit 1
-fi
-
-if [ ! -d "${dst}" ]
-then
- mkdir -p "${dst}"
- chown systemd-journal-remote:systemd-journal "${dst}"
- chmod 750 "${dst}"
-fi
-
-cd "${dst}"
-
-test ! -f ca.conf && cat >ca.conf <<EOF
-[ ca ]
-default_ca = CA_default
-[ CA_default ]
-new_certs_dir = .
-certificate = ca.pem
-database = ./index
-private_key = ca.key
-serial = ./serial
-default_days = 3650
-default_md = default
-policy = policy_anything
-[ policy_anything ]
-countryName = optional
-stateOrProvinceName = optional
-localityName = optional
-organizationName = optional
-organizationalUnitName = optional
-commonName = supplied
-emailAddress = optional
-EOF
-
-test ! -f index && touch index
-test ! -f serial && echo 0001 >serial
-
-if [ ! -f ca.pem -o ! -f ca.key ]; then
- echo >&2 "Generating ca.pem ..."
-
- openssl req -newkey rsa:2048 -days 3650 -x509 -nodes -out ca.pem -keyout ca.key -subj "/CN=systemd-journal-remote-ca/"
- chown systemd-journal-remote:systemd-journal ca.pem
- chmod 0640 ca.pem
-fi
-
-# -----------------------------------------------------------------------------
-# Create a server certificate
-
-generate_server_certificate() {
- local cn="${1}"; shift
-
- if [ ! -f "${cn}.pem" -o ! -f "${cn}.key" ]; then
- if [ -z "${*}" ]; then
- echo >"${cn}.conf"
- else
- echo "subjectAltName = $(echo "${@}" | tr " " ",")" >"${cn}.conf"
- fi
-
- echo >&2 "Generating server: ${cn}.pem and ${cn}.key ..."
-
- openssl req -newkey rsa:2048 -nodes -out "${cn}.csr" -keyout "${cn}.key" -subj "/CN=${cn}/"
- openssl ca -batch -config ca.conf -notext -in "${cn}.csr" -out "${cn}.pem" -extfile "${cn}.conf"
- else
- echo >&2 "certificates for ${cn} are already available."
- fi
-
- chown systemd-journal-remote:systemd-journal "${cn}.pem" "${cn}.key"
- chmod 0640 "${cn}.pem" "${cn}.key"
-}
-
-
-# -----------------------------------------------------------------------------
-# Create a script to install the certificate on each server
-
-generate_install_script() {
- local cn="${1}"
- local dst="/etc/ssl/systemd-journal"
-
- cat >"runme-on-${cn}.sh" <<EOFC1
-#!/usr/bin/env bash
-
-# stop on all errors
-set -e
-
-if [ \$UID -ne 0 ]; then
- echo >&2 "Hey! sudo me: sudo \${0}"
- exit 1
-fi
-
-# make sure the systemd-journal group exists
-# all certificates will be owned by this group
-if ! getent group systemd-journal >/dev/null 2>&1; then
- echo >&2 "Missing system group: systemd-journal. Did you install systemd-journald?"
- exit 1
-fi
-
-if ! getent passwd systemd-journal-remote >/dev/null 2>&1; then
- echo >&2 "Missing system user: systemd-journal-remote. Did you install systemd-journal-remote?"
- exit 1
-fi
-
-if [ ! -d ${dst} ]; then
- echo >&2 "creating directory: ${dst}"
- mkdir -p "${dst}"
-fi
-chown systemd-journal-remote:systemd-journal "${dst}"
-chmod 750 "${dst}"
-cd "${dst}"
-
-echo >&2 "saving trusted certificate file as: ${dst}/ca.pem"
-cat >ca.pem <<EOFCAPEM
-$(cat ca.pem)
-EOFCAPEM
-
-chown systemd-journal-remote:systemd-journal ca.pem
-chmod 0640 ca.pem
-
-echo >&2 "saving server ${cn} certificate file as: ${dst}/${cn}.pem"
-cat >"${cn}.pem" <<EOFSERPEM
-$(cat "${cn}.pem")
-EOFSERPEM
-
-chown systemd-journal-remote:systemd-journal "${cn}.pem"
-chmod 0640 "${cn}.pem"
-
-echo >&2 "saving server ${cn} key file as: ${dst}/${cn}.key"
-cat >"${cn}.key" <<EOFSERKEY
-$(cat "${cn}.key")
-EOFSERKEY
-
-chown systemd-journal-remote:systemd-journal "${cn}.key"
-chmod 0640 "${cn}.key"
-
-for cfg in /etc/systemd/journal-remote.conf /etc/systemd/journal-upload.conf
-do
- if [ -f \${cfg} ]; then
- # keep a backup of the file
- test ! -f \${cfg}.orig && cp \${cfg} \${cfg}.orig
-
- # fix its contents
- echo >&2 "updating the certificates in \${cfg}"
- sed -i "s|^#\\?\\s*ServerKeyFile=.*$|ServerKeyFile=${dst}/${cn}.key|" \${cfg}
- sed -i "s|^#\\?\\s*ServerCertificateFile=.*$|ServerCertificateFile=${dst}/${cn}.pem|" \${cfg}
- sed -i "s|^#\\?\\s*TrustedCertificateFile=.*$|TrustedCertificateFile=${dst}/ca.pem|" \${cfg}
- fi
-done
-
-echo >&2 "certificates installed - you may need to restart services to active them"
-echo >&2
-echo >&2 "If this is a central server:"
-echo >&2 "# systemctl restart systemd-journal-remote.socket"
-echo >&2
-echo >&2 "If this is a passive client:"
-echo >&2 "# systemctl restart systemd-journal-upload.service"
-echo >&2
-echo >&2 "If this is an active client:"
-echo >&2 "# systemctl restart systemd-journal-gateway.socket"
-EOFC1
-
- chmod 0700 "runme-on-${cn}.sh"
-}
-
-# -----------------------------------------------------------------------------
-# Create the client certificates
-
-generate_server_certificate "${@}"
-generate_install_script "${1}"
-
-
-# Set ANSI escape code for colors
-yellow_color="\033[1;33m"
-green_color="\033[0;32m"
-# Reset ANSI color after the message
-reset_color="\033[0m"
-
-
-echo >&2 -e "use this script to install it on ${1}: ${yellow_color}$(ls ${dst}/runme-on-${1}.sh)${reset_color}"
-echo >&2 "copy it to your server ${1}, like this:"
-echo >&2 -e "# ${green_color}scp ${dst}/runme-on-${1}.sh ${1}:/tmp/${reset_color}"
-echo >&2 "and then run it on that server to install the certificates"
-echo >&2
diff --git a/collectors/systemd-journal.plugin/systemd-journal-watcher.c b/collectors/systemd-journal.plugin/systemd-journal-watcher.c
deleted file mode 100644
index ed41f6247..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal-watcher.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-#include <sys/inotify.h>
-
-#define EVENT_SIZE (sizeof(struct inotify_event))
-#define INITIAL_WATCHES 256
-
-#define WATCH_FOR (IN_CREATE | IN_MODIFY | IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVED_TO | IN_UNMOUNT)
-
-typedef struct watch_entry {
- int slot;
-
- int wd; // Watch descriptor
- char *path; // Dynamically allocated path
-
- struct watch_entry *next; // for the free list
-} WatchEntry;
-
-typedef struct {
- WatchEntry *watchList;
- WatchEntry *freeList;
- int watchCount;
- int watchListSize;
-
- size_t errors;
-
- DICTIONARY *pending;
-} Watcher;
-
-static WatchEntry *get_slot(Watcher *watcher) {
- WatchEntry *t;
-
- if (watcher->freeList != NULL) {
- t = watcher->freeList;
- watcher->freeList = t->next;
- t->next = NULL;
- return t;
- }
-
- if (watcher->watchCount == watcher->watchListSize) {
- watcher->watchListSize *= 2;
- watcher->watchList = reallocz(watcher->watchList, watcher->watchListSize * sizeof(WatchEntry));
- }
-
- watcher->watchList[watcher->watchCount] = (WatchEntry){
- .slot = watcher->watchCount,
- .wd = -1,
- .path = NULL,
- .next = NULL,
- };
- t = &watcher->watchList[watcher->watchCount];
- watcher->watchCount++;
-
- return t;
-}
-
-static void free_slot(Watcher *watcher, WatchEntry *t) {
- t->wd = -1;
- freez(t->path);
- t->path = NULL;
-
- // link it to the free list
- t->next = watcher->freeList;
- watcher->freeList = t;
-}
-
-static int add_watch(Watcher *watcher, int inotifyFd, const char *path) {
- WatchEntry *t = get_slot(watcher);
-
- t->wd = inotify_add_watch(inotifyFd, path, WATCH_FOR);
- if (t->wd == -1) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "JOURNAL WATCHER: cannot watch directory: '%s'",
- path);
-
- free_slot(watcher, t);
-
- struct stat info;
- if(stat(path, &info) == 0 && S_ISDIR(info.st_mode)) {
- // the directory exists, but we failed to add the watch
- // increase errors
- watcher->errors++;
- }
- }
- else {
- t->path = strdupz(path);
-
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: watching directory: '%s'",
- path);
-
- }
- return t->wd;
-}
-
-static void remove_watch(Watcher *watcher, int inotifyFd, int wd) {
- int i;
- for (i = 0; i < watcher->watchCount; ++i) {
- if (watcher->watchList[i].wd == wd) {
-
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: removing watch from directory: '%s'",
- watcher->watchList[i].path);
-
- inotify_rm_watch(inotifyFd, watcher->watchList[i].wd);
- free_slot(watcher, &watcher->watchList[i]);
- return;
- }
- }
-
- nd_log(NDLS_COLLECTORS, NDLP_WARNING,
- "JOURNAL WATCHER: cannot find directory watch %d to remove.",
- wd);
-}
-
-static void free_watches(Watcher *watcher, int inotifyFd) {
- for (int i = 0; i < watcher->watchCount; ++i) {
- if (watcher->watchList[i].wd != -1) {
- inotify_rm_watch(inotifyFd, watcher->watchList[i].wd);
- free_slot(watcher, &watcher->watchList[i]);
- }
- }
- freez(watcher->watchList);
- watcher->watchList = NULL;
-
- dictionary_destroy(watcher->pending);
- watcher->pending = NULL;
-}
-
-static char* get_path_from_wd(Watcher *watcher, int wd) {
- for (int i = 0; i < watcher->watchCount; ++i) {
- if (watcher->watchList[i].wd == wd)
- return watcher->watchList[i].path;
- }
- return NULL;
-}
-
-static bool is_directory_watched(Watcher *watcher, const char *path) {
- for (int i = 0; i < watcher->watchCount; ++i) {
- if (watcher->watchList[i].wd != -1 && strcmp(watcher->watchList[i].path, path) == 0) {
- return true;
- }
- }
- return false;
-}
-
-static void watch_directory_and_subdirectories(Watcher *watcher, int inotifyFd, const char *basePath) {
- DICTIONARY *dirs = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
-
- journal_directory_scan_recursively(NULL, dirs, basePath, 0);
-
- void *x;
- dfe_start_read(dirs, x) {
- const char *dirname = x_dfe.name;
- // Check if this directory is already being watched
- if (!is_directory_watched(watcher, dirname)) {
- add_watch(watcher, inotifyFd, dirname);
- }
- }
- dfe_done(x);
-
- dictionary_destroy(dirs);
-}
-
-static bool is_subpath(const char *path, const char *subpath) {
- // Use strncmp to compare the paths
- if (strncmp(path, subpath, strlen(path)) == 0) {
- // Ensure that the next character is a '/' or '\0'
- char next_char = subpath[strlen(path)];
- return next_char == '/' || next_char == '\0';
- }
-
- return false;
-}
-
-void remove_directory_watch(Watcher *watcher, int inotifyFd, const char *dirPath) {
- for (int i = 0; i < watcher->watchCount; ++i) {
- WatchEntry *t = &watcher->watchList[i];
- if (t->wd != -1 && is_subpath(t->path, dirPath)) {
- inotify_rm_watch(inotifyFd, t->wd);
- free_slot(watcher, t);
- }
- }
-
- struct journal_file *jf;
- dfe_start_write(journal_files_registry, jf) {
- if(is_subpath(jf->filename, dirPath))
- dictionary_del(journal_files_registry, jf->filename);
- }
- dfe_done(jf);
-
- dictionary_garbage_collect(journal_files_registry);
-}
-
-void process_event(Watcher *watcher, int inotifyFd, struct inotify_event *event) {
- if(!event->len) {
- nd_log(NDLS_COLLECTORS, NDLP_NOTICE
- , "JOURNAL WATCHER: received event with mask %u and len %u (this is zero) for path: '%s' - ignoring it."
- , event->mask, event->len, event->name);
- return;
- }
-
- char *dirPath = get_path_from_wd(watcher, event->wd);
- if(!dirPath) {
- nd_log(NDLS_COLLECTORS, NDLP_NOTICE,
- "JOURNAL WATCHER: received event with mask %u and len %u for path: '%s' - "
- "but we can't find its watch descriptor - ignoring it."
- , event->mask, event->len, event->name);
- return;
- }
-
- if(event->mask & IN_DELETE_SELF) {
- remove_watch(watcher, inotifyFd, event->wd);
- return;
- }
-
- static __thread char fullPath[PATH_MAX];
- snprintfz(fullPath, sizeof(fullPath), "%s/%s", dirPath, event->name);
- // fullPath contains the full path to the file
-
- size_t len = strlen(event->name);
-
- if(event->mask & IN_ISDIR) {
- if (event->mask & (IN_DELETE | IN_MOVED_FROM)) {
- // A directory is deleted or moved out
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: Directory deleted or moved out: '%s'",
- fullPath);
-
- // Remove the watch - implement this function based on how you manage your watches
- remove_directory_watch(watcher, inotifyFd, fullPath);
- }
- else if (event->mask & (IN_CREATE | IN_MOVED_TO)) {
- // A new directory is created or moved in
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: New directory created or moved in: '%s'",
- fullPath);
-
- // Start watching the new directory - recursive watch
- watch_directory_and_subdirectories(watcher, inotifyFd, fullPath);
- }
- else
- nd_log(NDLS_COLLECTORS, NDLP_WARNING,
- "JOURNAL WATCHER: Received unhandled event with mask %u for directory '%s'",
- event->mask, fullPath);
- }
- else if(len > sizeof(".journal") - 1 && strcmp(&event->name[len - (sizeof(".journal") - 1)], ".journal") == 0) {
- // It is a file that ends in .journal
- // add it to our pending list
- dictionary_set(watcher->pending, fullPath, NULL, 0);
- }
- else
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: ignoring event with mask %u for file '%s'",
- event->mask, fullPath);
-}
-
-static void process_pending(Watcher *watcher) {
- void *x;
- dfe_start_write(watcher->pending, x) {
- struct stat info;
- const char *fullPath = x_dfe.name;
-
- if(stat(fullPath, &info) != 0) {
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: file '%s' no longer exists, removing it from the registry",
- fullPath);
-
- dictionary_del(journal_files_registry, fullPath);
- }
- else if(S_ISREG(info.st_mode)) {
- nd_log(NDLS_COLLECTORS, NDLP_DEBUG,
- "JOURNAL WATCHER: file '%s' has been added/updated, updating the registry",
- fullPath);
-
- struct journal_file t = {
- .file_last_modified_ut = info.st_mtim.tv_sec * USEC_PER_SEC +
- info.st_mtim.tv_nsec / NSEC_PER_USEC,
- .last_scan_monotonic_ut = now_monotonic_usec(),
- .size = info.st_size,
- .max_journal_vs_realtime_delta_ut = JOURNAL_VS_REALTIME_DELTA_DEFAULT_UT,
- };
- struct journal_file *jf = dictionary_set(journal_files_registry, fullPath, &t, sizeof(t));
- journal_file_update_header(jf->filename, jf);
- }
-
- dictionary_del(watcher->pending, fullPath);
- }
- dfe_done(x);
-
- dictionary_garbage_collect(watcher->pending);
-}
-
-void *journal_watcher_main(void *arg __maybe_unused) {
- while(1) {
- Watcher watcher = {
- .watchList = mallocz(INITIAL_WATCHES * sizeof(WatchEntry)),
- .freeList = NULL,
- .watchCount = 0,
- .watchListSize = INITIAL_WATCHES,
- .pending = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE|DICT_OPTION_SINGLE_THREADED),
- .errors = 0,
- };
-
- int inotifyFd = inotify_init();
- if (inotifyFd < 0) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR, "inotify_init() failed.");
- free_watches(&watcher, inotifyFd);
- return NULL;
- }
-
- for (unsigned i = 0; i < MAX_JOURNAL_DIRECTORIES; i++) {
- if (!journal_directories[i].path) break;
- watch_directory_and_subdirectories(&watcher, inotifyFd, journal_directories[i].path);
- }
-
- usec_t last_headers_update_ut = now_monotonic_usec();
- struct buffered_reader reader;
- while (1) {
- buffered_reader_ret_t rc = buffered_reader_read_timeout(
- &reader, inotifyFd, SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS, false);
-
- if (rc != BUFFERED_READER_READ_OK && rc != BUFFERED_READER_READ_POLL_TIMEOUT) {
- nd_log(NDLS_COLLECTORS, NDLP_CRIT,
- "JOURNAL WATCHER: cannot read inotify events, buffered_reader_read_timeout() returned %d - "
- "restarting the watcher.",
- rc);
- break;
- }
-
- if(rc == BUFFERED_READER_READ_OK) {
- bool unmount_event = false;
-
- ssize_t i = 0;
- while (i < reader.read_len) {
- struct inotify_event *event = (struct inotify_event *) &reader.read_buffer[i];
-
- if(event->mask & IN_UNMOUNT) {
- unmount_event = true;
- break;
- }
-
- process_event(&watcher, inotifyFd, event);
- i += (ssize_t)EVENT_SIZE + event->len;
- }
-
- reader.read_buffer[0] = '\0';
- reader.read_len = 0;
- reader.pos = 0;
-
- if(unmount_event)
- break;
- }
-
- usec_t ut = now_monotonic_usec();
- if (dictionary_entries(watcher.pending) && (rc == BUFFERED_READER_READ_POLL_TIMEOUT ||
- last_headers_update_ut + (SYSTEMD_JOURNAL_EXECUTE_WATCHER_PENDING_EVERY_MS * USEC_PER_MS) <= ut)) {
- process_pending(&watcher);
- last_headers_update_ut = ut;
- }
-
- if(watcher.errors) {
- nd_log(NDLS_COLLECTORS, NDLP_NOTICE,
- "JOURNAL WATCHER: there were errors in setting up inotify watches - restarting the watcher.");
- }
- }
-
- close(inotifyFd);
- free_watches(&watcher, inotifyFd);
-
- // this will scan the directories and cleanup the registry
- journal_files_registry_update();
-
- sleep_usec(5 * USEC_PER_SEC);
- }
-
- return NULL;
-}
diff --git a/collectors/systemd-journal.plugin/systemd-journal.c b/collectors/systemd-journal.plugin/systemd-journal.c
deleted file mode 100644
index f812b2161..000000000
--- a/collectors/systemd-journal.plugin/systemd-journal.c
+++ /dev/null
@@ -1,2139 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-/*
- * netdata systemd-journal.plugin
- * Copyright (C) 2023 Netdata Inc.
- * GPL v3+
- */
-
-#include "systemd-internals.h"
-
-/*
- * TODO
- *
- * _UDEV_DEVLINK is frequently set more than once per field - support multi-value faces
- *
- */
-
-#define FACET_MAX_VALUE_LENGTH 8192
-
-#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries."
-#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal"
-#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60
-#define SYSTEMD_JOURNAL_MAX_PARAMS 1000
-#define SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION (1 * 3600)
-#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY 200
-#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING 1000000
-#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000
-#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000
-
-#define JOURNAL_PARAMETER_HELP "help"
-#define JOURNAL_PARAMETER_AFTER "after"
-#define JOURNAL_PARAMETER_BEFORE "before"
-#define JOURNAL_PARAMETER_ANCHOR "anchor"
-#define JOURNAL_PARAMETER_LAST "last"
-#define JOURNAL_PARAMETER_QUERY "query"
-#define JOURNAL_PARAMETER_FACETS "facets"
-#define JOURNAL_PARAMETER_HISTOGRAM "histogram"
-#define JOURNAL_PARAMETER_DIRECTION "direction"
-#define JOURNAL_PARAMETER_IF_MODIFIED_SINCE "if_modified_since"
-#define JOURNAL_PARAMETER_DATA_ONLY "data_only"
-#define JOURNAL_PARAMETER_SOURCE "source"
-#define JOURNAL_PARAMETER_INFO "info"
-#define JOURNAL_PARAMETER_ID "id"
-#define JOURNAL_PARAMETER_PROGRESS "progress"
-#define JOURNAL_PARAMETER_SLICE "slice"
-#define JOURNAL_PARAMETER_DELTA "delta"
-#define JOURNAL_PARAMETER_TAIL "tail"
-#define JOURNAL_PARAMETER_SAMPLING "sampling"
-
-#define JOURNAL_KEY_ND_JOURNAL_FILE "ND_JOURNAL_FILE"
-#define JOURNAL_KEY_ND_JOURNAL_PROCESS "ND_JOURNAL_PROCESS"
-
-#define JOURNAL_DEFAULT_SLICE_MODE true
-#define JOURNAL_DEFAULT_DIRECTION FACETS_ANCHOR_DIRECTION_BACKWARD
-
-#define SYSTEMD_ALWAYS_VISIBLE_KEYS NULL
-
-#define SYSTEMD_KEYS_EXCLUDED_FROM_FACETS \
- "!MESSAGE_ID" \
- "|*MESSAGE*" \
- "|*_RAW" \
- "|*_USEC" \
- "|*_NSEC" \
- "|*TIMESTAMP*" \
- "|*_ID" \
- "|*_ID_*" \
- "|__*" \
- ""
-
-#define SYSTEMD_KEYS_INCLUDED_IN_FACETS \
- \
- /* --- USER JOURNAL FIELDS --- */ \
- \
- /* "|MESSAGE" */ \
- "|MESSAGE_ID" \
- "|PRIORITY" \
- "|CODE_FILE" \
- /* "|CODE_LINE" */ \
- "|CODE_FUNC" \
- "|ERRNO" \
- /* "|INVOCATION_ID" */ \
- /* "|USER_INVOCATION_ID" */ \
- "|SYSLOG_FACILITY" \
- "|SYSLOG_IDENTIFIER" \
- /* "|SYSLOG_PID" */ \
- /* "|SYSLOG_TIMESTAMP" */ \
- /* "|SYSLOG_RAW" */ \
- /* "!DOCUMENTATION" */ \
- /* "|TID" */ \
- "|UNIT" \
- "|USER_UNIT" \
- "|UNIT_RESULT" /* undocumented */ \
- \
- \
- /* --- TRUSTED JOURNAL FIELDS --- */ \
- \
- /* "|_PID" */ \
- "|_UID" \
- "|_GID" \
- "|_COMM" \
- "|_EXE" \
- /* "|_CMDLINE" */ \
- "|_CAP_EFFECTIVE" \
- /* "|_AUDIT_SESSION" */ \
- "|_AUDIT_LOGINUID" \
- "|_SYSTEMD_CGROUP" \
- "|_SYSTEMD_SLICE" \
- "|_SYSTEMD_UNIT" \
- "|_SYSTEMD_USER_UNIT" \
- "|_SYSTEMD_USER_SLICE" \
- "|_SYSTEMD_SESSION" \
- "|_SYSTEMD_OWNER_UID" \
- "|_SELINUX_CONTEXT" \
- /* "|_SOURCE_REALTIME_TIMESTAMP" */ \
- "|_BOOT_ID" \
- "|_MACHINE_ID" \
- /* "|_SYSTEMD_INVOCATION_ID" */ \
- "|_HOSTNAME" \
- "|_TRANSPORT" \
- "|_STREAM_ID" \
- /* "|LINE_BREAK" */ \
- "|_NAMESPACE" \
- "|_RUNTIME_SCOPE" \
- \
- \
- /* --- KERNEL JOURNAL FIELDS --- */ \
- \
- /* "|_KERNEL_DEVICE" */ \
- "|_KERNEL_SUBSYSTEM" \
- /* "|_UDEV_SYSNAME" */ \
- "|_UDEV_DEVNODE" \
- /* "|_UDEV_DEVLINK" */ \
- \
- \
- /* --- LOGGING ON BEHALF --- */ \
- \
- "|OBJECT_UID" \
- "|OBJECT_GID" \
- "|OBJECT_COMM" \
- "|OBJECT_EXE" \
- /* "|OBJECT_CMDLINE" */ \
- /* "|OBJECT_AUDIT_SESSION" */ \
- "|OBJECT_AUDIT_LOGINUID" \
- "|OBJECT_SYSTEMD_CGROUP" \
- "|OBJECT_SYSTEMD_SESSION" \
- "|OBJECT_SYSTEMD_OWNER_UID" \
- "|OBJECT_SYSTEMD_UNIT" \
- "|OBJECT_SYSTEMD_USER_UNIT" \
- \
- \
- /* --- CORE DUMPS --- */ \
- \
- "|COREDUMP_COMM" \
- "|COREDUMP_UNIT" \
- "|COREDUMP_USER_UNIT" \
- "|COREDUMP_SIGNAL_NAME" \
- "|COREDUMP_CGROUP" \
- \
- \
- /* --- DOCKER --- */ \
- \
- "|CONTAINER_ID" \
- /* "|CONTAINER_ID_FULL" */ \
- "|CONTAINER_NAME" \
- "|CONTAINER_TAG" \
- "|IMAGE_NAME" /* undocumented */ \
- /* "|CONTAINER_PARTIAL_MESSAGE" */ \
- \
- \
- /* --- NETDATA --- */ \
- \
- "|ND_NIDL_NODE" \
- "|ND_NIDL_CONTEXT" \
- "|ND_LOG_SOURCE" \
- /*"|ND_MODULE" */ \
- "|ND_ALERT_NAME" \
- "|ND_ALERT_CLASS" \
- "|ND_ALERT_COMPONENT" \
- "|ND_ALERT_TYPE" \
- \
- ""
-
-// ----------------------------------------------------------------------------
-
-typedef struct function_query_status {
- bool *cancelled; // a pointer to the cancelling boolean
- usec_t stop_monotonic_ut;
-
- usec_t started_monotonic_ut;
-
- // request
- SD_JOURNAL_FILE_SOURCE_TYPE source_type;
- SIMPLE_PATTERN *sources;
- usec_t after_ut;
- usec_t before_ut;
-
- struct {
- usec_t start_ut;
- usec_t stop_ut;
- } anchor;
-
- FACETS_ANCHOR_DIRECTION direction;
- size_t entries;
- usec_t if_modified_since;
- bool delta;
- bool tail;
- bool data_only;
- bool slice;
- size_t sampling;
- size_t filters;
- usec_t last_modified;
- const char *query;
- const char *histogram;
-
- struct {
- usec_t start_ut; // the starting time of the query - we start from this
- usec_t stop_ut; // the ending time of the query - we stop at this
- usec_t first_msg_ut;
-
- sd_id128_t first_msg_writer;
- uint64_t first_msg_seqnum;
- } query_file;
-
- struct {
- uint32_t enable_after_samples;
- uint32_t slots;
- uint32_t sampled;
- uint32_t unsampled;
- uint32_t estimated;
- } samples;
-
- struct {
- uint32_t enable_after_samples;
- uint32_t every;
- uint32_t skipped;
- uint32_t recalibrate;
- uint32_t sampled;
- uint32_t unsampled;
- uint32_t estimated;
- } samples_per_file;
-
- struct {
- usec_t start_ut;
- usec_t end_ut;
- usec_t step_ut;
- uint32_t enable_after_samples;
- uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
- uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
- } samples_per_time_slot;
-
- // per file progress info
- // size_t cached_count;
-
- // progress statistics
- usec_t matches_setup_ut;
- size_t rows_useful;
- size_t rows_read;
- size_t bytes_read;
- size_t files_matched;
- size_t file_working;
-} FUNCTION_QUERY_STATUS;
-
-static void log_fqs(FUNCTION_QUERY_STATUS *fqs, const char *msg) {
- netdata_log_error("ERROR: %s, on query "
- "timeframe [%"PRIu64" - %"PRIu64"], "
- "anchor [%"PRIu64" - %"PRIu64"], "
- "if_modified_since %"PRIu64", "
- "data_only:%s, delta:%s, tail:%s, direction:%s"
- , msg
- , fqs->after_ut, fqs->before_ut
- , fqs->anchor.start_ut, fqs->anchor.stop_ut
- , fqs->if_modified_since
- , fqs->data_only ? "true" : "false"
- , fqs->delta ? "true" : "false"
- , fqs->tail ? "tail" : "false"
- , fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward");
-}
-
-static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timestamp) {
- if(sd_journal_seek_realtime_usec(j, timestamp) < 0) {
- netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to %" PRIu64, timestamp);
- if(sd_journal_seek_tail(j) < 0) {
- netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to journal's tail");
- return false;
- }
- }
-
- return true;
-}
-
-#define JD_SOURCE_REALTIME_TIMESTAMP "_SOURCE_REALTIME_TIMESTAMP"
-
-// ----------------------------------------------------------------------------
-// sampling support
-
-static void sampling_query_init(FUNCTION_QUERY_STATUS *fqs, FACETS *facets) {
- if(!fqs->sampling)
- return;
-
- if(!fqs->slice) {
- // the user is doing a full data query
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- if(fqs->data_only) {
- // the user is doing a data query
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- if(!fqs->files_matched) {
- // no files have been matched
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- fqs->samples.slots = facets_histogram_slots(facets);
- if(fqs->samples.slots < 2) fqs->samples.slots = 2;
- if(fqs->samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS)
- fqs->samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS;
-
- if(!fqs->after_ut || !fqs->before_ut || fqs->after_ut >= fqs->before_ut) {
- // we don't have enough information for sampling
- fqs->sampling = 0;
- return;
- }
-
- usec_t delta = fqs->before_ut - fqs->after_ut;
- usec_t step = delta / facets_histogram_slots(facets) - 1;
- if(step < 1) step = 1;
-
- fqs->samples_per_time_slot.start_ut = fqs->after_ut;
- fqs->samples_per_time_slot.end_ut = fqs->before_ut;
- fqs->samples_per_time_slot.step_ut = step;
-
- // the minimum number of rows to enable sampling
- fqs->samples.enable_after_samples = fqs->sampling / 2;
-
- size_t files_matched = fqs->files_matched;
- if(!files_matched)
- files_matched = 1;
-
- // the minimum number of rows per file to enable sampling
- fqs->samples_per_file.enable_after_samples = (fqs->sampling / 4) / files_matched;
- if(fqs->samples_per_file.enable_after_samples < fqs->entries)
- fqs->samples_per_file.enable_after_samples = fqs->entries;
-
- // the minimum number of rows per time slot to enable sampling
- fqs->samples_per_time_slot.enable_after_samples = (fqs->sampling / 4) / fqs->samples.slots;
- if(fqs->samples_per_time_slot.enable_after_samples < fqs->entries)
- fqs->samples_per_time_slot.enable_after_samples = fqs->entries;
-}
-
-static void sampling_file_init(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf __maybe_unused) {
- fqs->samples_per_file.sampled = 0;
- fqs->samples_per_file.unsampled = 0;
- fqs->samples_per_file.estimated = 0;
- fqs->samples_per_file.every = 0;
- fqs->samples_per_file.skipped = 0;
- fqs->samples_per_file.recalibrate = 0;
-}
-
-static size_t sampling_file_lines_scanned_so_far(FUNCTION_QUERY_STATUS *fqs) {
- size_t sampled = fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled;
- if(!sampled) sampled = 1;
- return sampled;
-}
-
-static void sampling_running_file_query_overlapping_timeframe_ut(
- FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction,
- usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) {
-
- // find the overlap of the query and file timeframes
- // taking into account the first message we encountered
-
- usec_t oldest_ut, newest_ut;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
- // the first message we know (oldest)
- oldest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_first_ut;
- if(!oldest_ut) oldest_ut = fqs->query_file.start_ut;
-
- if(jf->msg_last_ut)
- newest_ut = MIN(fqs->query_file.stop_ut, jf->msg_last_ut);
- else if(jf->file_last_modified_ut)
- newest_ut = MIN(fqs->query_file.stop_ut, jf->file_last_modified_ut);
- else
- newest_ut = fqs->query_file.stop_ut;
-
- if(msg_ut < oldest_ut)
- oldest_ut = msg_ut - 1;
- }
- else /* BACKWARD */ {
- // the latest message we know (newest)
- newest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_last_ut;
- if(!newest_ut) newest_ut = fqs->query_file.start_ut;
-
- if(jf->msg_first_ut)
- oldest_ut = MAX(fqs->query_file.stop_ut, jf->msg_first_ut);
- else
- oldest_ut = fqs->query_file.stop_ut;
-
- if(newest_ut < msg_ut)
- newest_ut = msg_ut + 1;
- }
-
- *after_ut = oldest_ut;
- *before_ut = newest_ut;
-}
-
-static double sampling_running_file_query_progress_by_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
-
- usec_t after_ut, before_ut, elapsed_ut;
- sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut);
-
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
- elapsed_ut = msg_ut - after_ut;
- else
- elapsed_ut = before_ut - msg_ut;
-
- usec_t total_ut = before_ut - after_ut;
- double progress = (double)elapsed_ut / (double)total_ut;
-
- return progress;
-}
-
-static usec_t sampling_running_file_query_remaining_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut,
- usec_t *total_time_ut, usec_t *remaining_start_ut,
- usec_t *remaining_end_ut) {
- usec_t after_ut, before_ut;
- sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut);
-
- // since we have a timestamp in msg_ut
- // this timestamp can extend the overlap
- if(msg_ut <= after_ut)
- after_ut = msg_ut - 1;
-
- if(msg_ut >= before_ut)
- before_ut = msg_ut + 1;
-
- // return the remaining duration
- usec_t remaining_from_ut, remaining_to_ut;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
- remaining_from_ut = msg_ut;
- remaining_to_ut = before_ut;
- }
- else {
- remaining_from_ut = after_ut;
- remaining_to_ut = msg_ut;
- }
-
- usec_t remaining_ut = remaining_to_ut - remaining_from_ut;
-
- if(total_time_ut)
- *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1;
-
- if(remaining_start_ut)
- *remaining_start_ut = remaining_from_ut;
-
- if(remaining_end_ut)
- *remaining_end_ut = remaining_to_ut;
-
- return remaining_ut;
-}
-
-static size_t sampling_running_file_query_estimate_remaining_lines_by_time(FUNCTION_QUERY_STATUS *fqs,
- struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction,
- usec_t msg_ut) {
- size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs);
-
- // Calculate the proportion of time covered
- usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
- usec_t remaining_time_ut = sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut,
- &remaining_start_ut, &remaining_end_ut);
- if (total_time_ut == 0) total_time_ut = 1;
-
- double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut;
-
- if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time))
- proportion_by_time = 1.0;
-
- // Estimate the total number of lines in the file
- size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time);
-
- if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file)
- expected_matching_logs_by_time = jf->messages_in_file;
-
- // Calculate the estimated number of remaining lines
- size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines;
- if (remaining_logs_by_time < 1) remaining_logs_by_time = 1;
-
-// nd_log(NDLS_COLLECTORS, NDLP_INFO,
-// "JOURNAL ESTIMATION: '%s' "
-// "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], "
-// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], "
-// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], "
-// "first message read from the file at %"PRIu64", current message at %"PRIu64", "
-// "proportion of time %.2f %%, "
-// "expected total lines in file %zu, "
-// "remaining lines %zu, "
-// "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]"
-// , jf->filename
-// , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated
-// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file
-// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut
-// , fqs->query_file.first_msg_ut, msg_ut
-// , proportion_by_time * 100.0
-// , expected_matching_logs_by_time
-// , remaining_logs_by_time
-// , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut
-// );
-
- return remaining_logs_by_time;
-}
-
-static size_t sampling_running_file_query_estimate_remaining_lines(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
- size_t expected_matching_logs_by_seqnum = 0;
- double proportion_by_seqnum = 0.0;
- size_t remaining_logs_by_seqnum = 0;
-
-#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- uint64_t current_msg_seqnum;
- sd_id128_t current_msg_writer;
- if(!fqs->query_file.first_msg_seqnum || sd_journal_get_seqnum(j, &current_msg_seqnum, &current_msg_writer) < 0) {
- fqs->query_file.first_msg_seqnum = 0;
- fqs->query_file.first_msg_writer = SD_ID128_NULL;
- }
- else if(jf->messages_in_file) {
- size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs);
-
- double proportion_of_all_lines_so_far;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
- proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum);
- else
- proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum);
-
- if(proportion_of_all_lines_so_far > 1.0)
- proportion_of_all_lines_so_far = 1.0;
-
- expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file);
-
- proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum;
-
- if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum))
- proportion_by_seqnum = 1.0;
-
- remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines;
- if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1;
- }
-#endif
-
- if(remaining_logs_by_seqnum)
- return remaining_logs_by_seqnum;
-
- return sampling_running_file_query_estimate_remaining_lines_by_time(fqs, jf, direction, msg_ut);
-}
-
-static void sampling_decide_file_sampling_every(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
- size_t files_matched = fqs->files_matched;
- if(!files_matched) files_matched = 1;
-
- size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut);
- size_t wanted_samples = (fqs->sampling / 2) / files_matched;
- if(!wanted_samples) wanted_samples = 1;
-
- fqs->samples_per_file.every = remaining_lines / wanted_samples;
-
- if(fqs->samples_per_file.every < 1)
- fqs->samples_per_file.every = 1;
-}
-
-typedef enum {
- SAMPLING_STOP_AND_ESTIMATE = -1,
- SAMPLING_FULL = 0,
- SAMPLING_SKIP_FIELDS = 1,
-} sampling_t;
-
-static inline sampling_t is_row_in_sample(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) {
- if(!fqs->sampling || candidate_to_keep)
- return SAMPLING_FULL;
-
- if(unlikely(msg_ut < fqs->samples_per_time_slot.start_ut))
- msg_ut = fqs->samples_per_time_slot.start_ut;
- if(unlikely(msg_ut > fqs->samples_per_time_slot.end_ut))
- msg_ut = fqs->samples_per_time_slot.end_ut;
-
- size_t slot = (msg_ut - fqs->samples_per_time_slot.start_ut) / fqs->samples_per_time_slot.step_ut;
- if(slot >= fqs->samples.slots)
- slot = fqs->samples.slots - 1;
-
- bool should_sample = false;
-
- if(fqs->samples.sampled < fqs->samples.enable_after_samples ||
- fqs->samples_per_file.sampled < fqs->samples_per_file.enable_after_samples ||
- fqs->samples_per_time_slot.sampled[slot] < fqs->samples_per_time_slot.enable_after_samples)
- should_sample = true;
-
- else if(fqs->samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !fqs->samples_per_file.every) {
- // this is the first to be unsampled for this file
- sampling_decide_file_sampling_every(j, fqs, jf, direction, msg_ut);
- fqs->samples_per_file.recalibrate = 0;
- should_sample = true;
- }
- else {
- // we sample 1 every fqs->samples_per_file.every
- if(fqs->samples_per_file.skipped >= fqs->samples_per_file.every) {
- fqs->samples_per_file.skipped = 0;
- should_sample = true;
- }
- else
- fqs->samples_per_file.skipped++;
- }
-
- if(should_sample) {
- fqs->samples.sampled++;
- fqs->samples_per_file.sampled++;
- fqs->samples_per_time_slot.sampled[slot]++;
-
- return SAMPLING_FULL;
- }
-
- fqs->samples_per_file.recalibrate++;
-
- fqs->samples.unsampled++;
- fqs->samples_per_file.unsampled++;
- fqs->samples_per_time_slot.unsampled[slot]++;
-
- if(fqs->samples_per_file.unsampled > fqs->samples_per_file.sampled) {
- double progress_by_time = sampling_running_file_query_progress_by_time(fqs, jf, direction, msg_ut);
-
- if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE)
- return SAMPLING_STOP_AND_ESTIMATE;
- }
-
- return SAMPLING_SKIP_FIELDS;
-}
-
-static void sampling_update_running_query_file_estimates(FACETS *facets, sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) {
- usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
- sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut,
- &remaining_end_ut);
- size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut);
- facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines);
- fqs->samples.estimated += remaining_lines;
- fqs->samples_per_file.estimated += remaining_lines;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline size_t netdata_systemd_journal_process_row(sd_journal *j, FACETS *facets, struct journal_file *jf, usec_t *msg_ut) {
- const void *data;
- size_t length, bytes = 0;
-
- facets_add_key_value_length(facets, JOURNAL_KEY_ND_JOURNAL_FILE, sizeof(JOURNAL_KEY_ND_JOURNAL_FILE) - 1, jf->filename, jf->filename_len);
-
- SD_JOURNAL_FOREACH_DATA(j, data, length) {
- const char *key, *value;
- size_t key_length, value_length;
-
- if(!parse_journal_field(data, length, &key, &key_length, &value, &value_length))
- continue;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- usec_t origin_journal_ut = *msg_ut;
-#endif
- if(unlikely(key_length == sizeof(JD_SOURCE_REALTIME_TIMESTAMP) - 1 &&
- memcmp(key, JD_SOURCE_REALTIME_TIMESTAMP, sizeof(JD_SOURCE_REALTIME_TIMESTAMP) - 1) == 0)) {
- usec_t ut = str2ull(value, NULL);
- if(ut && ut < *msg_ut) {
- usec_t delta = *msg_ut - ut;
- *msg_ut = ut;
-
- if(delta > JOURNAL_VS_REALTIME_DELTA_MAX_UT)
- delta = JOURNAL_VS_REALTIME_DELTA_MAX_UT;
-
- // update max_journal_vs_realtime_delta_ut if the delta increased
- usec_t expected = jf->max_journal_vs_realtime_delta_ut;
- do {
- if(delta <= expected)
- break;
- } while(!__atomic_compare_exchange_n(&jf->max_journal_vs_realtime_delta_ut, &expected, delta, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
-
- internal_error(delta > expected,
- "increased max_journal_vs_realtime_delta_ut from %"PRIu64" to %"PRIu64", "
- "journal %"PRIu64", actual %"PRIu64" (delta %"PRIu64")"
- , expected, delta, origin_journal_ut, *msg_ut, origin_journal_ut - (*msg_ut));
- }
- }
-
- bytes += length;
- facets_add_key_value_length(facets, key, key_length, value, value_length <= FACET_MAX_VALUE_LENGTH ? value_length : FACET_MAX_VALUE_LENGTH);
- }
-
- return bytes;
-}
-
-#define FUNCTION_PROGRESS_UPDATE_ROWS(rows_read, rows) __atomic_fetch_add(&(rows_read), rows, __ATOMIC_RELAXED)
-#define FUNCTION_PROGRESS_UPDATE_BYTES(bytes_read, bytes) __atomic_fetch_add(&(bytes_read), bytes, __ATOMIC_RELAXED)
-#define FUNCTION_PROGRESS_EVERY_ROWS (1ULL << 13)
-#define FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS (1ULL << 7)
-
-static inline ND_SD_JOURNAL_STATUS check_stop(const bool *cancelled, const usec_t *stop_monotonic_ut) {
- if(cancelled && __atomic_load_n(cancelled, __ATOMIC_RELAXED)) {
- internal_error(true, "Function has been cancelled");
- return ND_SD_JOURNAL_CANCELLED;
- }
-
- if(now_monotonic_usec() > __atomic_load_n(stop_monotonic_ut, __ATOMIC_RELAXED)) {
- internal_error(true, "Function timed out");
- return ND_SD_JOURNAL_TIMED_OUT;
- }
-
- return ND_SD_JOURNAL_OK;
-}
-
-ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
- sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
-
- usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED);
-
- usec_t start_ut = ((fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->before_ut) + anchor_delta;
- usec_t stop_ut = (fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->after_ut;
- bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut);
-
- fqs->query_file.start_ut = start_ut;
- fqs->query_file.stop_ut = stop_ut;
-
- if(!netdata_systemd_journal_seek_to(j, start_ut))
- return ND_SD_JOURNAL_FAILED_TO_SEEK;
-
- size_t errors_no_timestamp = 0;
- usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far
- usec_t first_msg_ut = 0; // the first message we got from the db
- size_t row_counter = 0, last_row_counter = 0, rows_useful = 0;
- size_t bytes = 0, last_bytes = 0;
-
- usec_t last_usec_from = 0;
- usec_t last_usec_to = 0;
-
- ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_OK;
-
- facets_rows_begin(facets);
- while (status == ND_SD_JOURNAL_OK && sd_journal_previous(j) > 0) {
- usec_t msg_ut = 0;
- if(sd_journal_get_realtime_usec(j, &msg_ut) < 0 || !msg_ut) {
- errors_no_timestamp++;
- continue;
- }
-
- if (unlikely(msg_ut > start_ut))
- continue;
-
- if (unlikely(msg_ut < stop_ut))
- break;
-
- if(unlikely(msg_ut > latest_msg_ut))
- latest_msg_ut = msg_ut;
-
- if(unlikely(!first_msg_ut)) {
- first_msg_ut = msg_ut;
- fqs->query_file.first_msg_ut = msg_ut;
-
-#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- if(sd_journal_get_seqnum(j, &fqs->query_file.first_msg_seqnum, &fqs->query_file.first_msg_writer) < 0) {
- fqs->query_file.first_msg_seqnum = 0;
- fqs->query_file.first_msg_writer = SD_ID128_NULL;
- }
-#endif
- }
-
- sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut,
- FACETS_ANCHOR_DIRECTION_BACKWARD,
- facets_row_candidate_to_keep(facets, msg_ut));
-
- if(sample == SAMPLING_FULL) {
- bytes += netdata_systemd_journal_process_row(j, facets, jf, &msg_ut);
-
- // make sure each line gets a unique timestamp
- if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to))
- msg_ut = --last_usec_from;
- else
- last_usec_from = last_usec_to = msg_ut;
-
- if(facets_row_finished(facets, msg_ut))
- rows_useful++;
-
- row_counter++;
- if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
- stop_when_full &&
- facets_rows(facets) >= fqs->entries)) {
- // stop the data only query
- usec_t oldest = facets_row_oldest_ut(facets);
- if(oldest && msg_ut < (oldest - anchor_delta))
- break;
- }
-
- if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- last_row_counter = row_counter;
-
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
- last_bytes = bytes;
-
- status = check_stop(fqs->cancelled, &fqs->stop_monotonic_ut);
- }
- }
- else if(sample == SAMPLING_SKIP_FIELDS)
- facets_row_finished_unsampled(facets, msg_ut);
- else {
- sampling_update_running_query_file_estimates(facets, j, fqs, jf, msg_ut, FACETS_ANCHOR_DIRECTION_BACKWARD);
- break;
- }
- }
-
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
-
- fqs->rows_useful += rows_useful;
-
- if(errors_no_timestamp)
- netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp);
-
- if(latest_msg_ut > fqs->last_modified)
- fqs->last_modified = latest_msg_ut;
-
- return status;
-}
-
-ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
- sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
-
- usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED);
-
- usec_t start_ut = (fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->after_ut;
- usec_t stop_ut = ((fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->before_ut) + anchor_delta;
- bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut);
-
- fqs->query_file.start_ut = start_ut;
- fqs->query_file.stop_ut = stop_ut;
-
- if(!netdata_systemd_journal_seek_to(j, start_ut))
- return ND_SD_JOURNAL_FAILED_TO_SEEK;
-
- size_t errors_no_timestamp = 0;
- usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far
- usec_t first_msg_ut = 0; // the first message we got from the db
- size_t row_counter = 0, last_row_counter = 0, rows_useful = 0;
- size_t bytes = 0, last_bytes = 0;
-
- usec_t last_usec_from = 0;
- usec_t last_usec_to = 0;
-
- ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_OK;
-
- facets_rows_begin(facets);
- while (status == ND_SD_JOURNAL_OK && sd_journal_next(j) > 0) {
- usec_t msg_ut = 0;
- if(sd_journal_get_realtime_usec(j, &msg_ut) < 0 || !msg_ut) {
- errors_no_timestamp++;
- continue;
- }
-
- if (unlikely(msg_ut < start_ut))
- continue;
-
- if (unlikely(msg_ut > stop_ut))
- break;
-
- if(likely(msg_ut > latest_msg_ut))
- latest_msg_ut = msg_ut;
-
- if(unlikely(!first_msg_ut)) {
- first_msg_ut = msg_ut;
- fqs->query_file.first_msg_ut = msg_ut;
- }
-
- sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut,
- FACETS_ANCHOR_DIRECTION_FORWARD,
- facets_row_candidate_to_keep(facets, msg_ut));
-
- if(sample == SAMPLING_FULL) {
- bytes += netdata_systemd_journal_process_row(j, facets, jf, &msg_ut);
-
- // make sure each line gets a unique timestamp
- if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to))
- msg_ut = ++last_usec_to;
- else
- last_usec_from = last_usec_to = msg_ut;
-
- if(facets_row_finished(facets, msg_ut))
- rows_useful++;
-
- row_counter++;
- if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
- stop_when_full &&
- facets_rows(facets) >= fqs->entries)) {
- // stop the data only query
- usec_t newest = facets_row_newest_ut(facets);
- if(newest && msg_ut > (newest + anchor_delta))
- break;
- }
-
- if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- last_row_counter = row_counter;
-
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
- last_bytes = bytes;
-
- status = check_stop(fqs->cancelled, &fqs->stop_monotonic_ut);
- }
- }
- else if(sample == SAMPLING_SKIP_FIELDS)
- facets_row_finished_unsampled(facets, msg_ut);
- else {
- sampling_update_running_query_file_estimates(facets, j, fqs, jf, msg_ut, FACETS_ANCHOR_DIRECTION_FORWARD);
- break;
- }
- }
-
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
-
- fqs->rows_useful += rows_useful;
-
- if(errors_no_timestamp)
- netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp);
-
- if(latest_msg_ut > fqs->last_modified)
- fqs->last_modified = latest_msg_ut;
-
- return status;
-}
-
-bool netdata_systemd_journal_check_if_modified_since(sd_journal *j, usec_t seek_to, usec_t last_modified) {
- // return true, if data have been modified since the timestamp
-
- if(!last_modified || !seek_to)
- return false;
-
- if(!netdata_systemd_journal_seek_to(j, seek_to))
- return false;
-
- usec_t first_msg_ut = 0;
- while (sd_journal_previous(j) > 0) {
- usec_t msg_ut;
- if(sd_journal_get_realtime_usec(j, &msg_ut) < 0)
- continue;
-
- first_msg_ut = msg_ut;
- break;
- }
-
- return first_msg_ut != last_modified;
-}
-
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
-static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) {
- const char *field = NULL;
- const void *data = NULL;
- size_t data_length;
- size_t added_keys = 0;
- size_t failures = 0;
- size_t filters_added = 0;
-
- SD_JOURNAL_FOREACH_FIELD(j, field) { // for each key
- bool interesting;
-
- if(fqs->data_only)
- interesting = facets_key_name_is_filter(facets, field);
- else
- interesting = facets_key_name_is_facet(facets, field);
-
- if(interesting) {
- if(sd_journal_query_unique(j, field) >= 0) {
- bool added_this_key = false;
- size_t added_values = 0;
-
- SD_JOURNAL_FOREACH_UNIQUE(j, data, data_length) { // for each value of the key
- const char *key, *value;
- size_t key_length, value_length;
-
- if(!parse_journal_field(data, data_length, &key, &key_length, &value, &value_length))
- continue;
-
- facets_add_possible_value_name_to_key(facets, key, key_length, value, value_length);
-
- if(!facets_key_name_value_length_is_selected(facets, key, key_length, value, value_length))
- continue;
-
- if(added_keys && !added_this_key) {
- if(sd_journal_add_conjunction(j) < 0) // key AND key AND key
- failures++;
-
- added_this_key = true;
- added_keys++;
- }
- else if(added_values)
- if(sd_journal_add_disjunction(j) < 0) // value OR value OR value
- failures++;
-
- if(sd_journal_add_match(j, data, data_length) < 0)
- failures++;
-
- if(!added_keys) {
- added_keys++;
- added_this_key = true;
- }
-
- added_values++;
- filters_added++;
- }
- }
- }
- }
-
- if(failures) {
- log_fqs(fqs, "failed to setup journal filter, will run the full query.");
- sd_journal_flush_matches(j);
- return true;
- }
-
- return filters_added ? true : false;
-}
-#endif // HAVE_SD_JOURNAL_RESTART_FIELDS
-
-static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file(
- const char *filename, BUFFER *wb, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
-
- sd_journal *j = NULL;
- errno = 0;
-
- fstat_cache_enable_on_thread();
-
- const char *paths[2] = {
- [0] = filename,
- [1] = NULL,
- };
-
- if(sd_journal_open_files(&j, paths, ND_SD_JOURNAL_OPEN_FLAGS) < 0 || !j) {
- netdata_log_error("JOURNAL: cannot open file '%s' for query", filename);
- fstat_cache_disable_on_thread();
- return ND_SD_JOURNAL_FAILED_TO_OPEN;
- }
-
- ND_SD_JOURNAL_STATUS status;
- bool matches_filters = true;
-
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- if(fqs->slice) {
- usec_t started = now_monotonic_usec();
-
- matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->filters;
- usec_t ended = now_monotonic_usec();
-
- fqs->matches_setup_ut += (ended - started);
- }
-#endif // HAVE_SD_JOURNAL_RESTART_FIELDS
-
- if(matches_filters) {
- if(fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD)
- status = netdata_systemd_journal_query_forward(j, wb, facets, jf, fqs);
- else
- status = netdata_systemd_journal_query_backward(j, wb, facets, jf, fqs);
- }
- else
- status = ND_SD_JOURNAL_NO_FILE_MATCHED;
-
- sd_journal_close(j);
- fstat_cache_disable_on_thread();
-
- return status;
-}
-
-static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
-
- if((fqs->source_type == SDJF_NONE && !fqs->sources) || (jf->source_type & fqs->source_type) ||
- (fqs->sources && simple_pattern_matches(fqs->sources, string2str(jf->source)))) {
-
- if(!jf->msg_last_ut || !jf->msg_last_ut)
- // the file is not scanned yet, or the timestamps have not been updated,
- // so we don't know if it can contribute or not - let's add it.
- return true;
-
- usec_t anchor_delta = JOURNAL_VS_REALTIME_DELTA_MAX_UT;
- usec_t first_ut = jf->msg_first_ut - anchor_delta;
- usec_t last_ut = jf->msg_last_ut + anchor_delta;
-
- if(last_ut >= fqs->after_ut && first_ut <= fqs->before_ut)
- return true;
- }
-
- return false;
-}
-
-static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) {
- ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_NO_FILE_MATCHED;
- struct journal_file *jf;
-
- fqs->files_matched = 0;
- fqs->file_working = 0;
- fqs->rows_useful = 0;
- fqs->rows_read = 0;
- fqs->bytes_read = 0;
-
- size_t files_used = 0;
- size_t files_max = dictionary_entries(journal_files_registry);
- const DICTIONARY_ITEM *file_items[files_max];
-
- // count the files
- bool files_are_newer = false;
- dfe_start_read(journal_files_registry, jf) {
- if(!jf_is_mine(jf, fqs))
- continue;
-
- file_items[files_used++] = dictionary_acquired_item_dup(journal_files_registry, jf_dfe.item);
-
- if(jf->msg_last_ut > fqs->if_modified_since)
- files_are_newer = true;
- }
- dfe_done(jf);
-
- fqs->files_matched = files_used;
-
- if(fqs->if_modified_since && !files_are_newer) {
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
- }
-
- // sort the files, so that they are optimal for facets
- if(files_used >= 2) {
- if (fqs->direction == FACETS_ANCHOR_DIRECTION_BACKWARD)
- qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *),
- journal_file_dict_items_backward_compar);
- else
- qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *),
- journal_file_dict_items_forward_compar);
- }
-
- bool partial = false;
- usec_t query_started_ut = now_monotonic_usec();
- usec_t started_ut = query_started_ut;
- usec_t ended_ut = started_ut;
- usec_t duration_ut = 0, max_duration_ut = 0;
-
- sampling_query_init(fqs, facets);
-
- buffer_json_member_add_array(wb, "_journal_files");
- for(size_t f = 0; f < files_used ;f++) {
- const char *filename = dictionary_acquired_item_name(file_items[f]);
- jf = dictionary_acquired_item_value(file_items[f]);
-
- if(!jf_is_mine(jf, fqs))
- continue;
-
- started_ut = ended_ut;
-
- // do not even try to do the query if we expect it to pass the timeout
- if(ended_ut > (query_started_ut + (fqs->stop_monotonic_ut - query_started_ut) * 3 / 4) &&
- ended_ut + max_duration_ut * 2 >= fqs->stop_monotonic_ut) {
-
- partial = true;
- status = ND_SD_JOURNAL_TIMED_OUT;
- break;
- }
-
- fqs->file_working++;
- // fqs->cached_count = 0;
-
- size_t fs_calls = fstat_thread_calls;
- size_t fs_cached = fstat_thread_cached_responses;
- size_t rows_useful = fqs->rows_useful;
- size_t rows_read = fqs->rows_read;
- size_t bytes_read = fqs->bytes_read;
- size_t matches_setup_ut = fqs->matches_setup_ut;
-
- sampling_file_init(fqs, jf);
-
- ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, fqs);
-
-// nd_log(NDLS_COLLECTORS, NDLP_INFO,
-// "JOURNAL ESTIMATION FINAL: '%s' "
-// "total lines %zu [sampled=%zu, unsampled=%zu, estimated=%zu], "
-// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], "
-// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], "
-// , jf->filename
-// , fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled + fqs->samples_per_file.estimated
-// , fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated
-// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file
-// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut
-// );
-
- rows_useful = fqs->rows_useful - rows_useful;
- rows_read = fqs->rows_read - rows_read;
- bytes_read = fqs->bytes_read - bytes_read;
- matches_setup_ut = fqs->matches_setup_ut - matches_setup_ut;
- fs_calls = fstat_thread_calls - fs_calls;
- fs_cached = fstat_thread_cached_responses - fs_cached;
-
- ended_ut = now_monotonic_usec();
- duration_ut = ended_ut - started_ut;
-
- if(duration_ut > max_duration_ut)
- max_duration_ut = duration_ut;
-
- buffer_json_add_array_item_object(wb); // journal file
- {
- // information about the file
- buffer_json_member_add_string(wb, "_filename", filename);
- buffer_json_member_add_uint64(wb, "_source_type", jf->source_type);
- buffer_json_member_add_string(wb, "_source", string2str(jf->source));
- buffer_json_member_add_uint64(wb, "_last_modified_ut", jf->file_last_modified_ut);
- buffer_json_member_add_uint64(wb, "_msg_first_ut", jf->msg_first_ut);
- buffer_json_member_add_uint64(wb, "_msg_last_ut", jf->msg_last_ut);
- buffer_json_member_add_uint64(wb, "_journal_vs_realtime_delta_ut", jf->max_journal_vs_realtime_delta_ut);
-
- // information about the current use of the file
- buffer_json_member_add_uint64(wb, "duration_ut", ended_ut - started_ut);
- buffer_json_member_add_uint64(wb, "rows_read", rows_read);
- buffer_json_member_add_uint64(wb, "rows_useful", rows_useful);
- buffer_json_member_add_double(wb, "rows_per_second", (double) rows_read / (double) duration_ut * (double) USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, "bytes_read", bytes_read);
- buffer_json_member_add_double(wb, "bytes_per_second", (double) bytes_read / (double) duration_ut * (double) USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, "duration_matches_ut", matches_setup_ut);
- buffer_json_member_add_uint64(wb, "fstat_query_calls", fs_calls);
- buffer_json_member_add_uint64(wb, "fstat_query_cached_responses", fs_cached);
-
- if(fqs->sampling) {
- buffer_json_member_add_object(wb, "_sampling");
- {
- buffer_json_member_add_uint64(wb, "sampled", fqs->samples_per_file.sampled);
- buffer_json_member_add_uint64(wb, "unsampled", fqs->samples_per_file.unsampled);
- buffer_json_member_add_uint64(wb, "estimated", fqs->samples_per_file.estimated);
- }
- buffer_json_object_close(wb); // _sampling
- }
- }
- buffer_json_object_close(wb); // journal file
-
- bool stop = false;
- switch(tmp_status) {
- case ND_SD_JOURNAL_OK:
- case ND_SD_JOURNAL_NO_FILE_MATCHED:
- status = (status == ND_SD_JOURNAL_OK) ? ND_SD_JOURNAL_OK : tmp_status;
- break;
-
- case ND_SD_JOURNAL_FAILED_TO_OPEN:
- case ND_SD_JOURNAL_FAILED_TO_SEEK:
- partial = true;
- if(status == ND_SD_JOURNAL_NO_FILE_MATCHED)
- status = tmp_status;
- break;
-
- case ND_SD_JOURNAL_CANCELLED:
- case ND_SD_JOURNAL_TIMED_OUT:
- partial = true;
- stop = true;
- status = tmp_status;
- break;
-
- case ND_SD_JOURNAL_NOT_MODIFIED:
- internal_fatal(true, "this should never be returned here");
- break;
- }
-
- if(stop)
- break;
- }
- buffer_json_array_close(wb); // _journal_files
-
- // release the files
- for(size_t f = 0; f < files_used ;f++)
- dictionary_acquired_item_release(journal_files_registry, file_items[f]);
-
- switch (status) {
- case ND_SD_JOURNAL_OK:
- if(fqs->if_modified_since && !fqs->rows_useful) {
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
- }
- break;
-
- case ND_SD_JOURNAL_TIMED_OUT:
- case ND_SD_JOURNAL_NO_FILE_MATCHED:
- break;
-
- case ND_SD_JOURNAL_CANCELLED:
- buffer_flush(wb);
- return HTTP_RESP_CLIENT_CLOSED_REQUEST;
-
- case ND_SD_JOURNAL_NOT_MODIFIED:
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
-
- default:
- case ND_SD_JOURNAL_FAILED_TO_OPEN:
- case ND_SD_JOURNAL_FAILED_TO_SEEK:
- buffer_flush(wb);
- return HTTP_RESP_INTERNAL_SERVER_ERROR;
- }
-
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_boolean(wb, "partial", partial);
- buffer_json_member_add_string(wb, "type", "table");
-
- // build a message for the query
- if(!fqs->data_only) {
- CLEAN_BUFFER *msg = buffer_create(0, NULL);
- CLEAN_BUFFER *msg_description = buffer_create(0, NULL);
- ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO;
-
- if(!journal_files_completed_once()) {
- buffer_strcat(msg, "Journals are still being scanned. ");
- buffer_strcat(msg_description
- , "LIBRARY SCAN: The journal files are still being scanned, you are probably viewing incomplete data. ");
- msg_priority = NDLP_WARNING;
- }
-
- if(partial) {
- buffer_strcat(msg, "Query timed-out, incomplete data. ");
- buffer_strcat(msg_description
- , "QUERY TIMEOUT: The query timed out and may not include all the data of the selected window. ");
- msg_priority = NDLP_WARNING;
- }
-
- if(fqs->samples.estimated || fqs->samples.unsampled) {
- double percent = (double) (fqs->samples.sampled * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
- buffer_sprintf(msg, "%.2f%% real data", percent);
- buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent);
- msg_priority = MIN(msg_priority, NDLP_NOTICE);
- }
-
- if(fqs->samples.unsampled) {
- double percent = (double) (fqs->samples.unsampled * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
- buffer_sprintf(msg, ", %.2f%% unsampled", percent);
- buffer_sprintf(msg_description
- , "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. "
- , percent);
- msg_priority = MIN(msg_priority, NDLP_NOTICE);
- }
-
- if(fqs->samples.estimated) {
- double percent = (double) (fqs->samples.estimated * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
- buffer_sprintf(msg, ", %.2f%% estimated", percent);
- buffer_sprintf(msg_description
- , "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. "
- , percent);
- msg_priority = MIN(msg_priority, NDLP_NOTICE);
- }
-
- buffer_json_member_add_object(wb, "message");
- if(buffer_tostring(msg)) {
- buffer_json_member_add_string(wb, "title", buffer_tostring(msg));
- buffer_json_member_add_string(wb, "description", buffer_tostring(msg_description));
- buffer_json_member_add_string(wb, "status", nd_log_id2priority(msg_priority));
- }
- // else send an empty object if there is nothing to tell
- buffer_json_object_close(wb); // message
- }
-
- if(!fqs->data_only) {
- buffer_json_member_add_time_t(wb, "update_every", 1);
- buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION);
- }
-
- if(!fqs->data_only || fqs->tail)
- buffer_json_member_add_uint64(wb, "last_modified", fqs->last_modified);
-
- facets_sort_and_reorder_keys(facets);
- facets_report(facets, wb, used_hashes_registry);
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + (fqs->data_only ? 3600 : 0));
-
- buffer_json_member_add_object(wb, "_fstat_caching");
- {
- buffer_json_member_add_uint64(wb, "calls", fstat_thread_calls);
- buffer_json_member_add_uint64(wb, "cached", fstat_thread_cached_responses);
- }
- buffer_json_object_close(wb); // _fstat_caching
-
- if(fqs->sampling) {
- buffer_json_member_add_object(wb, "_sampling");
- {
- buffer_json_member_add_uint64(wb, "sampled", fqs->samples.sampled);
- buffer_json_member_add_uint64(wb, "unsampled", fqs->samples.unsampled);
- buffer_json_member_add_uint64(wb, "estimated", fqs->samples.estimated);
- }
- buffer_json_object_close(wb); // _sampling
- }
-
- buffer_json_finalize(wb);
-
- return HTTP_RESP_OK;
-}
-
-static void netdata_systemd_journal_function_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb,
- "%s / %s\n"
- "\n"
- "%s\n"
- "\n"
- "The following parameters are supported:\n"
- "\n"
- " "JOURNAL_PARAMETER_HELP"\n"
- " Shows this help message.\n"
- "\n"
- " "JOURNAL_PARAMETER_INFO"\n"
- " Request initial configuration information about the plugin.\n"
- " The key entity returned is the required_params array, which includes\n"
- " all the available systemd journal sources.\n"
- " When `"JOURNAL_PARAMETER_INFO"` is requested, all other parameters are ignored.\n"
- "\n"
- " "JOURNAL_PARAMETER_ID":STRING\n"
- " Caller supplied unique ID of the request.\n"
- " This can be used later to request a progress report of the query.\n"
- " Optional, but if omitted no `"JOURNAL_PARAMETER_PROGRESS"` can be requested.\n"
- "\n"
- " "JOURNAL_PARAMETER_PROGRESS"\n"
- " Request a progress report (the `id` of a running query is required).\n"
- " When `"JOURNAL_PARAMETER_PROGRESS"` is requested, only parameter `"JOURNAL_PARAMETER_ID"` is used.\n"
- "\n"
- " "JOURNAL_PARAMETER_DATA_ONLY":true or "JOURNAL_PARAMETER_DATA_ONLY":false\n"
- " Quickly respond with data requested, without generating a\n"
- " `histogram`, `facets` counters and `items`.\n"
- "\n"
- " "JOURNAL_PARAMETER_DELTA":true or "JOURNAL_PARAMETER_DELTA":false\n"
- " When doing data only queries, include deltas for histogram, facets and items.\n"
- "\n"
- " "JOURNAL_PARAMETER_TAIL":true or "JOURNAL_PARAMETER_TAIL":false\n"
- " When doing data only queries, respond with the newest messages,\n"
- " and up to the anchor, but calculate deltas (if requested) for\n"
- " the duration [anchor - before].\n"
- "\n"
- " "JOURNAL_PARAMETER_SLICE":true or "JOURNAL_PARAMETER_SLICE":false\n"
- " When it is turned on, the plugin is executing filtering via libsystemd,\n"
- " utilizing all the available indexes of the journal files.\n"
- " When it is off, only the time constraint is handled by libsystemd and\n"
- " all filtering is done by the plugin.\n"
- " The default is: %s\n"
- "\n"
- " "JOURNAL_PARAMETER_SOURCE":SOURCE\n"
- " Query only the specified journal sources.\n"
- " Do an `"JOURNAL_PARAMETER_INFO"` query to find the sources.\n"
- "\n"
- " "JOURNAL_PARAMETER_BEFORE":TIMESTAMP_IN_SECONDS\n"
- " Absolute or relative (to now) timestamp in seconds, to start the query.\n"
- " The query is always executed from the most recent to the oldest log entry.\n"
- " If not given the default is: now.\n"
- "\n"
- " "JOURNAL_PARAMETER_AFTER":TIMESTAMP_IN_SECONDS\n"
- " Absolute or relative (to `before`) timestamp in seconds, to end the query.\n"
- " If not given, the default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_LAST":ITEMS\n"
- " The number of items to return.\n"
- " The default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_SAMPLING":ITEMS\n"
- " The number of log entries to sample to estimate facets counters and histogram.\n"
- " The default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_ANCHOR":TIMESTAMP_IN_MICROSECONDS\n"
- " Return items relative to this timestamp.\n"
- " The exact items to be returned depend on the query `"JOURNAL_PARAMETER_DIRECTION"`.\n"
- "\n"
- " "JOURNAL_PARAMETER_DIRECTION":forward or "JOURNAL_PARAMETER_DIRECTION":backward\n"
- " When set to `backward` (default) the items returned are the newest before the\n"
- " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_BEFORE"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n"
- " When set to `forward` the items returned are the oldest after the\n"
- " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_AFTER"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n"
- " The default is: %s\n"
- "\n"
- " "JOURNAL_PARAMETER_QUERY":SIMPLE_PATTERN\n"
- " Do a full text search to find the log entries matching the pattern given.\n"
- " The plugin is searching for matches on all fields of the database.\n"
- "\n"
- " "JOURNAL_PARAMETER_IF_MODIFIED_SINCE":TIMESTAMP_IN_MICROSECONDS\n"
- " Each successful response, includes a `last_modified` field.\n"
- " By providing the timestamp to the `"JOURNAL_PARAMETER_IF_MODIFIED_SINCE"` parameter,\n"
- " the plugin will return 200 with a successful response, or 304 if the source has not\n"
- " been modified since that timestamp.\n"
- "\n"
- " "JOURNAL_PARAMETER_HISTOGRAM":facet_id\n"
- " Use the given `facet_id` for the histogram.\n"
- " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n"
- "\n"
- " "JOURNAL_PARAMETER_FACETS":facet_id1,facet_id2,facet_id3,...\n"
- " Add the given facets to the list of fields for which analysis is required.\n"
- " The plugin will offer both a histogram and facet value counters for its values.\n"
- " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n"
- "\n"
- " facet_id:value_id1,value_id2,value_id3,...\n"
- " Apply filters to the query, based on the facet IDs returned.\n"
- " Each `facet_id` can be given once, but multiple `facet_ids` can be given.\n"
- "\n"
- , program_name
- , SYSTEMD_JOURNAL_FUNCTION_NAME
- , SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION
- , JOURNAL_DEFAULT_SLICE_MODE ? "true" : "false" // slice
- , -SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION
- , SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY
- , SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING
- , JOURNAL_DEFAULT_DIRECTION == FACETS_ANCHOR_DIRECTION_BACKWARD ? "backward" : "forward"
- );
-
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
- buffer_free(wb);
-}
-
-DICTIONARY *function_query_status_dict = NULL;
-
-static void function_systemd_journal_progress(BUFFER *wb, const char *transaction, const char *progress_id) {
- if(!progress_id || !(*progress_id)) {
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST, "missing progress id");
- netdata_mutex_unlock(&stdout_mutex);
- return;
- }
-
- const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(function_query_status_dict, progress_id);
-
- if(!item) {
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_NOT_FOUND, "progress id is not found here");
- netdata_mutex_unlock(&stdout_mutex);
- return;
- }
-
- FUNCTION_QUERY_STATUS *fqs = dictionary_acquired_item_value(item);
-
- usec_t now_monotonic_ut = now_monotonic_usec();
- if(now_monotonic_ut + 10 * USEC_PER_SEC > fqs->stop_monotonic_ut)
- fqs->stop_monotonic_ut = now_monotonic_ut + 10 * USEC_PER_SEC;
-
- usec_t duration_ut = now_monotonic_ut - fqs->started_monotonic_ut;
-
- size_t files_matched = fqs->files_matched;
- size_t file_working = fqs->file_working;
- if(file_working > files_matched)
- files_matched = file_working;
-
- size_t rows_read = __atomic_load_n(&fqs->rows_read, __ATOMIC_RELAXED);
- size_t bytes_read = __atomic_load_n(&fqs->bytes_read, __ATOMIC_RELAXED);
-
- buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_uint64(wb, "running_duration_usec", duration_ut);
- buffer_json_member_add_double(wb, "progress", (double)file_working * 100.0 / (double)files_matched);
- char msg[1024 + 1];
- snprintfz(msg, sizeof(msg) - 1,
- "Read %zu rows (%0.0f rows/s), "
- "data %0.1f MB (%0.1f MB/s), "
- "file %zu of %zu",
- rows_read, (double)rows_read / (double)duration_ut * (double)USEC_PER_SEC,
- (double)bytes_read / 1024.0 / 1024.0, ((double)bytes_read / (double)duration_ut * (double)USEC_PER_SEC) / 1024.0 / 1024.0,
- file_working, files_matched
- );
- buffer_json_member_add_string(wb, "message", msg);
- buffer_json_finalize(wb);
-
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
- dictionary_acquired_item_release(function_query_status_dict, item);
-}
-
-void function_systemd_journal(const char *transaction, char *function, int timeout, bool *cancelled) {
- fstat_thread_calls = 0;
- fstat_thread_cached_responses = 0;
-
- BUFFER *wb = buffer_create(0, NULL);
- buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
-
- usec_t now_monotonic_ut = now_monotonic_usec();
- FUNCTION_QUERY_STATUS tmp_fqs = {
- .cancelled = cancelled,
- .started_monotonic_ut = now_monotonic_ut,
- .stop_monotonic_ut = now_monotonic_ut + (timeout * USEC_PER_SEC),
- };
- FUNCTION_QUERY_STATUS *fqs = NULL;
- const DICTIONARY_ITEM *fqs_item = NULL;
-
- FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS,
- SYSTEMD_ALWAYS_VISIBLE_KEYS,
- SYSTEMD_KEYS_INCLUDED_IN_FACETS,
- SYSTEMD_KEYS_EXCLUDED_FROM_FACETS);
-
- facets_accepted_param(facets, JOURNAL_PARAMETER_INFO);
- facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER);
- facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION);
- facets_accepted_param(facets, JOURNAL_PARAMETER_LAST);
- facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY);
- facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS);
- facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM);
- facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY);
- facets_accepted_param(facets, JOURNAL_PARAMETER_ID);
- facets_accepted_param(facets, JOURNAL_PARAMETER_PROGRESS);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA);
- facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL);
- facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING);
-
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE);
-#endif // HAVE_SD_JOURNAL_RESTART_FIELDS
-
- // register the fields in the order you want them on the dashboard
-
- facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL);
-
- facets_register_key_name(facets, "_HOSTNAME",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_VISIBLE);
-
- facets_register_dynamic_key_name(facets, JOURNAL_KEY_ND_JOURNAL_PROCESS,
- FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE,
- netdata_systemd_journal_dynamic_row_id, NULL);
-
- facets_register_key_name(facets, "MESSAGE",
- FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT |
- FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS);
-
-// facets_register_dynamic_key_name(facets, "MESSAGE",
-// FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT |
-// FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS,
-// netdata_systemd_journal_rich_message, NULL);
-
- facets_register_key_name_transformation(facets, "PRIORITY",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_priority, NULL);
-
- facets_register_key_name_transformation(facets, "SYSLOG_FACILITY",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_syslog_facility, NULL);
-
- facets_register_key_name_transformation(facets, "ERRNO",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_errno, NULL);
-
- facets_register_key_name(facets, JOURNAL_KEY_ND_JOURNAL_FILE,
- FACET_KEY_OPTION_NEVER_FACET);
-
- facets_register_key_name(facets, "SYSLOG_IDENTIFIER",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name(facets, "UNIT",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name(facets, "USER_UNIT",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name_transformation(facets, "MESSAGE_ID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_message_id, NULL);
-
- facets_register_key_name_transformation(facets, "_BOOT_ID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_boot_id, NULL);
-
- facets_register_key_name_transformation(facets, "_SYSTEMD_OWNER_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_SYSTEMD_OWNER_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "_GID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_gid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_GID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_gid, NULL);
-
- facets_register_key_name_transformation(facets, "_CAP_EFFECTIVE",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_cap_effective, NULL);
-
- facets_register_key_name_transformation(facets, "_AUDIT_LOGINUID",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_AUDIT_LOGINUID",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "_SOURCE_REALTIME_TIMESTAMP",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_timestamp_usec, NULL);
-
- // ------------------------------------------------------------------------
- // parse the parameters
-
- bool info = false, data_only = false, progress = false, slice = JOURNAL_DEFAULT_SLICE_MODE, delta = false, tail = false;
- time_t after_s = 0, before_s = 0;
- usec_t anchor = 0;
- usec_t if_modified_since = 0;
- size_t last = 0;
- FACETS_ANCHOR_DIRECTION direction = JOURNAL_DEFAULT_DIRECTION;
- const char *query = NULL;
- const char *chart = NULL;
- SIMPLE_PATTERN *sources = NULL;
- const char *progress_id = NULL;
- SD_JOURNAL_FILE_SOURCE_TYPE source_type = SDJF_ALL;
- size_t filters = 0;
- size_t sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING;
-
- buffer_json_member_add_object(wb, "_request");
-
- char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_JOURNAL_MAX_PARAMS);
- for(int i = 1; i < SYSTEMD_JOURNAL_MAX_PARAMS ;i++) {
- char *keyword = get_word(words, num_words, i);
- if(!keyword) break;
-
- if(strcmp(keyword, JOURNAL_PARAMETER_HELP) == 0) {
- netdata_systemd_journal_function_help(transaction);
- goto cleanup;
- }
- else if(strcmp(keyword, JOURNAL_PARAMETER_INFO) == 0) {
- info = true;
- }
- else if(strcmp(keyword, JOURNAL_PARAMETER_PROGRESS) == 0) {
- progress = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DELTA ":", sizeof(JOURNAL_PARAMETER_DELTA ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_DELTA ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- delta = false;
- else
- delta = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_TAIL ":", sizeof(JOURNAL_PARAMETER_TAIL ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_TAIL ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- tail = false;
- else
- tail = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SAMPLING ":", sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1) == 0) {
- sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DATA_ONLY ":", sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- data_only = false;
- else
- data_only = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SLICE ":", sizeof(JOURNAL_PARAMETER_SLICE ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_SLICE ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- slice = false;
- else
- slice = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_ID ":", sizeof(JOURNAL_PARAMETER_ID ":") - 1) == 0) {
- char *id = &keyword[sizeof(JOURNAL_PARAMETER_ID ":") - 1];
-
- if(*id)
- progress_id = id;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SOURCE ":", sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1) == 0) {
- const char *value = &keyword[sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1];
-
- buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE);
-
- BUFFER *sources_list = buffer_create(0, NULL);
-
- source_type = SDJF_NONE;
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
-
- buffer_json_add_array_item_string(wb, value);
-
- if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) {
- source_type |= SDJF_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) {
- source_type |= SDJF_LOCAL_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) {
- source_type |= SDJF_REMOTE_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) {
- source_type |= SDJF_LOCAL_NAMESPACE;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) {
- source_type |= SDJF_LOCAL_SYSTEM;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) {
- source_type |= SDJF_LOCAL_USER;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) {
- source_type |= SDJF_LOCAL_OTHER;
- value = NULL;
- }
- else {
- // else, match the source, whatever it is
- if(buffer_strlen(sources_list))
- buffer_strcat(sources_list, ",");
-
- buffer_strcat(sources_list, value);
- }
-
- value = sep;
- }
-
- if(buffer_strlen(sources_list)) {
- simple_pattern_free(sources);
- sources = simple_pattern_create(buffer_tostring(sources_list), ",", SIMPLE_PATTERN_EXACT, false);
- }
-
- buffer_free(sources_list);
-
- buffer_json_array_close(wb); // source
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", sizeof(JOURNAL_PARAMETER_AFTER ":") - 1) == 0) {
- after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1) == 0) {
- before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) {
- if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1) == 0) {
- anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DIRECTION ":", sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1) == 0) {
- direction = strcasecmp(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1], "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", sizeof(JOURNAL_PARAMETER_LAST ":") - 1) == 0) {
- last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", sizeof(JOURNAL_PARAMETER_QUERY ":") - 1) == 0) {
- query= &keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1];
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_HISTOGRAM ":", sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1) == 0) {
- chart = &keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1];
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_FACETS ":", sizeof(JOURNAL_PARAMETER_FACETS ":") - 1) == 0) {
- char *value = &keyword[sizeof(JOURNAL_PARAMETER_FACETS ":") - 1];
- if(*value) {
- buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS);
-
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
-
- facets_register_facet_id(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER);
- buffer_json_add_array_item_string(wb, value);
-
- value = sep;
- }
-
- buffer_json_array_close(wb); // JOURNAL_PARAMETER_FACETS
- }
- }
- else {
- char *value = strchr(keyword, ':');
- if(value) {
- *value++ = '\0';
-
- buffer_json_member_add_array(wb, keyword);
-
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
-
- facets_register_facet_id_filter(facets, keyword, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER);
- buffer_json_add_array_item_string(wb, value);
- filters++;
-
- value = sep;
- }
-
- buffer_json_array_close(wb); // keyword
- }
- }
- }
-
- // ------------------------------------------------------------------------
- // put this request into the progress db
-
- if(progress_id && *progress_id) {
- fqs_item = dictionary_set_and_acquire_item(function_query_status_dict, progress_id, &tmp_fqs, sizeof(tmp_fqs));
- fqs = dictionary_acquired_item_value(fqs_item);
- }
- else {
- // no progress id given, proceed without registering our progress in the dictionary
- fqs = &tmp_fqs;
- fqs_item = NULL;
- }
-
- // ------------------------------------------------------------------------
- // validate parameters
-
- time_t now_s = now_realtime_sec();
- time_t expires = now_s + 1;
-
- if(!after_s && !before_s) {
- before_s = now_s;
- after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION;
- }
- else
- rrdr_relative_window_to_absolute(&after_s, &before_s, now_s);
-
- if(after_s > before_s) {
- time_t tmp = after_s;
- after_s = before_s;
- before_s = tmp;
- }
-
- if(after_s == before_s)
- after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION;
-
- if(!last)
- last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY;
-
-
- // ------------------------------------------------------------------------
- // set query time-frame, anchors and direction
-
- fqs->after_ut = after_s * USEC_PER_SEC;
- fqs->before_ut = (before_s * USEC_PER_SEC) + USEC_PER_SEC - 1;
- fqs->if_modified_since = if_modified_since;
- fqs->data_only = data_only;
- fqs->delta = (fqs->data_only) ? delta : false;
- fqs->tail = (fqs->data_only && fqs->if_modified_since) ? tail : false;
- fqs->sources = sources;
- fqs->source_type = source_type;
- fqs->entries = last;
- fqs->last_modified = 0;
- fqs->filters = filters;
- fqs->query = (query && *query) ? query : NULL;
- fqs->histogram = (chart && *chart) ? chart : NULL;
- fqs->direction = direction;
- fqs->anchor.start_ut = anchor;
- fqs->anchor.stop_ut = 0;
- fqs->sampling = sampling;
-
- if(fqs->anchor.start_ut && fqs->tail) {
- // a tail request
- // we need the top X entries from BEFORE
- // but, we need to calculate the facets and the
- // histogram up to the anchor
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = anchor;
- }
-
- if(anchor && anchor < fqs->after_ut) {
- log_fqs(fqs, "received anchor is too small for query timeframe, ignoring anchor");
- anchor = 0;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = 0;
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
- else if(anchor > fqs->before_ut) {
- log_fqs(fqs, "received anchor is too big for query timeframe, ignoring anchor");
- anchor = 0;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = 0;
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
-
- facets_set_anchor(facets, fqs->anchor.start_ut, fqs->anchor.stop_ut, fqs->direction);
-
- facets_set_additional_options(facets,
- ((fqs->data_only) ? FACETS_OPTION_DATA_ONLY : 0) |
- ((fqs->delta) ? FACETS_OPTION_SHOW_DELTAS : 0));
-
- // ------------------------------------------------------------------------
- // set the rest of the query parameters
-
-
- facets_set_items(facets, fqs->entries);
- facets_set_query(facets, fqs->query);
-
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- fqs->slice = slice;
- if(slice)
- facets_enable_slice_mode(facets);
-#else
- fqs->slice = false;
-#endif
-
- if(fqs->histogram)
- facets_set_timeframe_and_histogram_by_id(facets, fqs->histogram, fqs->after_ut, fqs->before_ut);
- else
- facets_set_timeframe_and_histogram_by_name(facets, "PRIORITY", fqs->after_ut, fqs->before_ut);
-
-
- // ------------------------------------------------------------------------
- // complete the request object
-
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_INFO, false);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_SLICE, fqs->slice);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DATA_ONLY, fqs->data_only);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_PROGRESS, false);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DELTA, fqs->delta);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_TAIL, fqs->tail);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_SAMPLING, fqs->sampling);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_ID, progress_id);
- buffer_json_member_add_uint64(wb, "source_type", fqs->source_type);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_AFTER, fqs->after_ut / USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_BEFORE, fqs->before_ut / USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, "if_modified_since", fqs->if_modified_since);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, anchor);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_DIRECTION, fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward");
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_LAST, fqs->entries);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_QUERY, fqs->query);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_HISTOGRAM, fqs->histogram);
- buffer_json_object_close(wb); // request
-
- buffer_json_journal_versions(wb);
-
- // ------------------------------------------------------------------------
- // run the request
-
- int response;
-
- if(info) {
- facets_accepted_parameters_to_json_array(facets, wb, false);
- buffer_json_member_add_array(wb, "required_params");
- {
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "id", "source");
- buffer_json_member_add_string(wb, "name", "source");
- buffer_json_member_add_string(wb, "help", "Select the SystemD Journal source to query");
- buffer_json_member_add_string(wb, "type", "multiselect");
- buffer_json_member_add_array(wb, "options");
- {
- available_journal_file_sources_to_json_array(wb);
- }
- buffer_json_array_close(wb); // options array
- }
- buffer_json_object_close(wb); // required params object
- }
- buffer_json_array_close(wb); // required_params array
-
- facets_table_config(wb);
-
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION);
- buffer_json_finalize(wb);
- response = HTTP_RESP_OK;
- goto output;
- }
-
- if(progress) {
- function_systemd_journal_progress(wb, transaction, progress_id);
- goto cleanup;
- }
-
- response = netdata_systemd_journal_query(wb, facets, fqs);
-
- // ------------------------------------------------------------------------
- // handle error response
-
- if(response != HTTP_RESP_OK) {
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_json_error_to_stdout(transaction, response, "failed");
- netdata_mutex_unlock(&stdout_mutex);
- goto cleanup;
- }
-
-output:
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, response, "application/json", expires, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
-cleanup:
- simple_pattern_free(sources);
- facets_destroy(facets);
- buffer_free(wb);
-
- if(fqs_item) {
- dictionary_del(function_query_status_dict, dictionary_acquired_item_name(fqs_item));
- dictionary_acquired_item_release(function_query_status_dict, fqs_item);
- dictionary_garbage_collect(function_query_status_dict);
- }
-}
-
-void journal_init_query_status(void) {
- function_query_status_dict = dictionary_create_advanced(
- DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
- NULL, sizeof(FUNCTION_QUERY_STATUS));
-}
diff --git a/collectors/systemd-journal.plugin/systemd-main.c b/collectors/systemd-journal.plugin/systemd-main.c
deleted file mode 100644
index a3510b0ed..000000000
--- a/collectors/systemd-journal.plugin/systemd-main.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-#include "libnetdata/required_dummies.h"
-
-#define SYSTEMD_JOURNAL_WORKER_THREADS 5
-
-netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER;
-static bool plugin_should_exit = false;
-
-int main(int argc __maybe_unused, char **argv __maybe_unused) {
- clocks_init();
- netdata_thread_set_tag("SDMAIN");
- nd_log_initialize_for_external_plugins("systemd-journal.plugin");
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix(true) == -1) exit(1);
-
- // ------------------------------------------------------------------------
- // initialization
-
- netdata_systemd_journal_message_ids_init();
- journal_init_query_status();
- journal_init_files_and_directories();
-
- // ------------------------------------------------------------------------
- // debug
-
- if(argc == 2 && strcmp(argv[1], "debug") == 0) {
- journal_files_registry_update();
-
- bool cancelled = false;
- char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all";
- // char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU";
- // char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403";
- function_systemd_journal("123", buf, 600, &cancelled);
-// function_systemd_units("123", "systemd-units", 600, &cancelled);
- exit(1);
- }
-#ifdef ENABLE_SYSTEMD_DBUS
- if(argc == 2 && strcmp(argv[1], "debug-units") == 0) {
- bool cancelled = false;
- function_systemd_units("123", "systemd-units", 600, &cancelled);
- exit(1);
- }
-#endif
-
- // ------------------------------------------------------------------------
- // watcher thread
-
- netdata_thread_t watcher_thread;
- netdata_thread_create(&watcher_thread, "SDWATCH",
- NETDATA_THREAD_OPTION_DONT_LOG, journal_watcher_main, NULL);
-
- // ------------------------------------------------------------------------
- // the event loop for functions
-
- struct functions_evloop_globals *wg =
- functions_evloop_init(SYSTEMD_JOURNAL_WORKER_THREADS, "SDJ", &stdout_mutex, &plugin_should_exit);
-
- functions_evloop_add_function(wg, SYSTEMD_JOURNAL_FUNCTION_NAME, function_systemd_journal,
- SYSTEMD_JOURNAL_DEFAULT_TIMEOUT);
-
-#ifdef ENABLE_SYSTEMD_DBUS
- functions_evloop_add_function(wg, SYSTEMD_UNITS_FUNCTION_NAME, function_systemd_units,
- SYSTEMD_UNITS_DEFAULT_TIMEOUT);
-#endif
-
- // ------------------------------------------------------------------------
- // register functions to netdata
-
- netdata_mutex_lock(&stdout_mutex);
-
- fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\"\n",
- SYSTEMD_JOURNAL_FUNCTION_NAME, SYSTEMD_JOURNAL_DEFAULT_TIMEOUT, SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION);
-
-#ifdef ENABLE_SYSTEMD_DBUS
- fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\"\n",
- SYSTEMD_UNITS_FUNCTION_NAME, SYSTEMD_UNITS_DEFAULT_TIMEOUT, SYSTEMD_UNITS_FUNCTION_DESCRIPTION);
-#endif
-
- fflush(stdout);
- netdata_mutex_unlock(&stdout_mutex);
-
- // ------------------------------------------------------------------------
-
- usec_t step_ut = 100 * USEC_PER_MS;
- usec_t send_newline_ut = 0;
- usec_t since_last_scan_ut = SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start
- bool tty = isatty(fileno(stderr)) == 1;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(!plugin_should_exit) {
-
- if(since_last_scan_ut > SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC) {
- journal_files_registry_update();
- since_last_scan_ut = 0;
- }
-
- usec_t dt_ut = heartbeat_next(&hb, step_ut);
- since_last_scan_ut += dt_ut;
- send_newline_ut += dt_ut;
-
- if(!tty && send_newline_ut > USEC_PER_SEC) {
- send_newline_and_flush();
- send_newline_ut = 0;
- }
- }
-
- exit(0);
-}
diff --git a/collectors/systemd-journal.plugin/systemd-units.c b/collectors/systemd-journal.plugin/systemd-units.c
deleted file mode 100644
index dac158817..000000000
--- a/collectors/systemd-journal.plugin/systemd-units.c
+++ /dev/null
@@ -1,1965 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "systemd-internals.h"
-
-#ifdef ENABLE_SYSTEMD_DBUS
-#include <systemd/sd-bus.h>
-
-#define SYSTEMD_UNITS_MAX_PARAMS 10
-#define SYSTEMD_UNITS_DBUS_TYPES "(ssssssouso)"
-
-// ----------------------------------------------------------------------------
-// copied from systemd: string-table.h
-
-typedef char sd_char;
-#define XCONCATENATE(x, y) x ## y
-#define CONCATENATE(x, y) XCONCATENATE(x, y)
-
-#ifndef __COVERITY__
-# define VOID_0 ((void)0)
-#else
-# define VOID_0 ((void*)0)
-#endif
-
-#define ELEMENTSOF(x) \
- (__builtin_choose_expr( \
- !__builtin_types_compatible_p(typeof(x), typeof(&*(x))), \
- sizeof(x)/sizeof((x)[0]), \
- VOID_0))
-
-#define UNIQ_T(x, uniq) CONCATENATE(__unique_prefix_, CONCATENATE(x, uniq))
-#define UNIQ __COUNTER__
-#define __CMP(aq, a, bq, b) \
- ({ \
- const typeof(a) UNIQ_T(A, aq) = (a); \
- const typeof(b) UNIQ_T(B, bq) = (b); \
- UNIQ_T(A, aq) < UNIQ_T(B, bq) ? -1 : \
- UNIQ_T(A, aq) > UNIQ_T(B, bq) ? 1 : 0; \
- })
-#define CMP(a, b) __CMP(UNIQ, (a), UNIQ, (b))
-
-static inline int strcmp_ptr(const sd_char *a, const sd_char *b) {
- if (a && b)
- return strcmp(a, b);
-
- return CMP(a, b);
-}
-
-static inline bool streq_ptr(const sd_char *a, const sd_char *b) {
- return strcmp_ptr(a, b) == 0;
-}
-
-ssize_t string_table_lookup(const char * const *table, size_t len, const char *key) {
- if (!key || !*key)
- return -EINVAL;
-
- for (size_t i = 0; i < len; ++i)
- if (streq_ptr(table[i], key))
- return (ssize_t) i;
-
- return -EINVAL;
-}
-
-/* For basic lookup tables with strictly enumerated entries */
-#define _DEFINE_STRING_TABLE_LOOKUP_TO_STRING(name,type,scope) \
- scope const char *name##_to_string(type i) { \
- if (i < 0 || i >= (type) ELEMENTSOF(name##_table)) \
- return NULL; \
- return name##_table[i]; \
- }
-
-#define _DEFINE_STRING_TABLE_LOOKUP_FROM_STRING(name,type,scope) \
- scope type name##_from_string(const char *s) { \
- return (type) string_table_lookup(name##_table, ELEMENTSOF(name##_table), s); \
- }
-
-#define _DEFINE_STRING_TABLE_LOOKUP(name,type,scope) \
- _DEFINE_STRING_TABLE_LOOKUP_TO_STRING(name,type,scope) \
- _DEFINE_STRING_TABLE_LOOKUP_FROM_STRING(name,type,scope)
-
-#define DEFINE_STRING_TABLE_LOOKUP(name,type) _DEFINE_STRING_TABLE_LOOKUP(name,type,)
-
-// ----------------------------------------------------------------------------
-// copied from systemd: unit-def.h
-
-typedef enum UnitType {
- UNIT_SERVICE,
- UNIT_MOUNT,
- UNIT_SWAP,
- UNIT_SOCKET,
- UNIT_TARGET,
- UNIT_DEVICE,
- UNIT_AUTOMOUNT,
- UNIT_TIMER,
- UNIT_PATH,
- UNIT_SLICE,
- UNIT_SCOPE,
- _UNIT_TYPE_MAX,
- _UNIT_TYPE_INVALID = -EINVAL,
-} UnitType;
-
-typedef enum UnitLoadState {
- UNIT_STUB,
- UNIT_LOADED,
- UNIT_NOT_FOUND, /* error condition #1: unit file not found */
- UNIT_BAD_SETTING, /* error condition #2: we couldn't parse some essential unit file setting */
- UNIT_ERROR, /* error condition #3: other "system" error, catchall for the rest */
- UNIT_MERGED,
- UNIT_MASKED,
- _UNIT_LOAD_STATE_MAX,
- _UNIT_LOAD_STATE_INVALID = -EINVAL,
-} UnitLoadState;
-
-typedef enum UnitActiveState {
- UNIT_ACTIVE,
- UNIT_RELOADING,
- UNIT_INACTIVE,
- UNIT_FAILED,
- UNIT_ACTIVATING,
- UNIT_DEACTIVATING,
- UNIT_MAINTENANCE,
- _UNIT_ACTIVE_STATE_MAX,
- _UNIT_ACTIVE_STATE_INVALID = -EINVAL,
-} UnitActiveState;
-
-typedef enum AutomountState {
- AUTOMOUNT_DEAD,
- AUTOMOUNT_WAITING,
- AUTOMOUNT_RUNNING,
- AUTOMOUNT_FAILED,
- _AUTOMOUNT_STATE_MAX,
- _AUTOMOUNT_STATE_INVALID = -EINVAL,
-} AutomountState;
-
-typedef enum DeviceState {
- DEVICE_DEAD,
- DEVICE_TENTATIVE, /* mounted or swapped, but not (yet) announced by udev */
- DEVICE_PLUGGED, /* announced by udev */
- _DEVICE_STATE_MAX,
- _DEVICE_STATE_INVALID = -EINVAL,
-} DeviceState;
-
-typedef enum MountState {
- MOUNT_DEAD,
- MOUNT_MOUNTING, /* /usr/bin/mount is running, but the mount is not done yet. */
- MOUNT_MOUNTING_DONE, /* /usr/bin/mount is running, and the mount is done. */
- MOUNT_MOUNTED,
- MOUNT_REMOUNTING,
- MOUNT_UNMOUNTING,
- MOUNT_REMOUNTING_SIGTERM,
- MOUNT_REMOUNTING_SIGKILL,
- MOUNT_UNMOUNTING_SIGTERM,
- MOUNT_UNMOUNTING_SIGKILL,
- MOUNT_FAILED,
- MOUNT_CLEANING,
- _MOUNT_STATE_MAX,
- _MOUNT_STATE_INVALID = -EINVAL,
-} MountState;
-
-typedef enum PathState {
- PATH_DEAD,
- PATH_WAITING,
- PATH_RUNNING,
- PATH_FAILED,
- _PATH_STATE_MAX,
- _PATH_STATE_INVALID = -EINVAL,
-} PathState;
-
-typedef enum ScopeState {
- SCOPE_DEAD,
- SCOPE_START_CHOWN,
- SCOPE_RUNNING,
- SCOPE_ABANDONED,
- SCOPE_STOP_SIGTERM,
- SCOPE_STOP_SIGKILL,
- SCOPE_FAILED,
- _SCOPE_STATE_MAX,
- _SCOPE_STATE_INVALID = -EINVAL,
-} ScopeState;
-
-typedef enum ServiceState {
- SERVICE_DEAD,
- SERVICE_CONDITION,
- SERVICE_START_PRE,
- SERVICE_START,
- SERVICE_START_POST,
- SERVICE_RUNNING,
- SERVICE_EXITED, /* Nothing is running anymore, but RemainAfterExit is true hence this is OK */
- SERVICE_RELOAD, /* Reloading via ExecReload= */
- SERVICE_RELOAD_SIGNAL, /* Reloading via SIGHUP requested */
- SERVICE_RELOAD_NOTIFY, /* Waiting for READY=1 after RELOADING=1 notify */
- SERVICE_STOP, /* No STOP_PRE state, instead just register multiple STOP executables */
- SERVICE_STOP_WATCHDOG,
- SERVICE_STOP_SIGTERM,
- SERVICE_STOP_SIGKILL,
- SERVICE_STOP_POST,
- SERVICE_FINAL_WATCHDOG, /* In case the STOP_POST executable needs to be aborted. */
- SERVICE_FINAL_SIGTERM, /* In case the STOP_POST executable hangs, we shoot that down, too */
- SERVICE_FINAL_SIGKILL,
- SERVICE_FAILED,
- SERVICE_DEAD_BEFORE_AUTO_RESTART,
- SERVICE_FAILED_BEFORE_AUTO_RESTART,
- SERVICE_DEAD_RESOURCES_PINNED, /* Like SERVICE_DEAD, but with pinned resources */
- SERVICE_AUTO_RESTART,
- SERVICE_AUTO_RESTART_QUEUED,
- SERVICE_CLEANING,
- _SERVICE_STATE_MAX,
- _SERVICE_STATE_INVALID = -EINVAL,
-} ServiceState;
-
-typedef enum SliceState {
- SLICE_DEAD,
- SLICE_ACTIVE,
- _SLICE_STATE_MAX,
- _SLICE_STATE_INVALID = -EINVAL,
-} SliceState;
-
-typedef enum SocketState {
- SOCKET_DEAD,
- SOCKET_START_PRE,
- SOCKET_START_CHOWN,
- SOCKET_START_POST,
- SOCKET_LISTENING,
- SOCKET_RUNNING,
- SOCKET_STOP_PRE,
- SOCKET_STOP_PRE_SIGTERM,
- SOCKET_STOP_PRE_SIGKILL,
- SOCKET_STOP_POST,
- SOCKET_FINAL_SIGTERM,
- SOCKET_FINAL_SIGKILL,
- SOCKET_FAILED,
- SOCKET_CLEANING,
- _SOCKET_STATE_MAX,
- _SOCKET_STATE_INVALID = -EINVAL,
-} SocketState;
-
-typedef enum SwapState {
- SWAP_DEAD,
- SWAP_ACTIVATING, /* /sbin/swapon is running, but the swap not yet enabled. */
- SWAP_ACTIVATING_DONE, /* /sbin/swapon is running, and the swap is done. */
- SWAP_ACTIVE,
- SWAP_DEACTIVATING,
- SWAP_DEACTIVATING_SIGTERM,
- SWAP_DEACTIVATING_SIGKILL,
- SWAP_FAILED,
- SWAP_CLEANING,
- _SWAP_STATE_MAX,
- _SWAP_STATE_INVALID = -EINVAL,
-} SwapState;
-
-typedef enum TargetState {
- TARGET_DEAD,
- TARGET_ACTIVE,
- _TARGET_STATE_MAX,
- _TARGET_STATE_INVALID = -EINVAL,
-} TargetState;
-
-typedef enum TimerState {
- TIMER_DEAD,
- TIMER_WAITING,
- TIMER_RUNNING,
- TIMER_ELAPSED,
- TIMER_FAILED,
- _TIMER_STATE_MAX,
- _TIMER_STATE_INVALID = -EINVAL,
-} TimerState;
-
-typedef enum FreezerState {
- FREEZER_RUNNING,
- FREEZER_FREEZING,
- FREEZER_FROZEN,
- FREEZER_THAWING,
- _FREEZER_STATE_MAX,
- _FREEZER_STATE_INVALID = -EINVAL,
-} FreezerState;
-
-// ----------------------------------------------------------------------------
-// copied from systemd: unit-def.c
-
-static const char* const unit_type_table[_UNIT_TYPE_MAX] = {
- [UNIT_SERVICE] = "service",
- [UNIT_SOCKET] = "socket",
- [UNIT_TARGET] = "target",
- [UNIT_DEVICE] = "device",
- [UNIT_MOUNT] = "mount",
- [UNIT_AUTOMOUNT] = "automount",
- [UNIT_SWAP] = "swap",
- [UNIT_TIMER] = "timer",
- [UNIT_PATH] = "path",
- [UNIT_SLICE] = "slice",
- [UNIT_SCOPE] = "scope",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(unit_type, UnitType);
-
-static const char* const unit_load_state_table[_UNIT_LOAD_STATE_MAX] = {
- [UNIT_STUB] = "stub",
- [UNIT_LOADED] = "loaded",
- [UNIT_NOT_FOUND] = "not-found",
- [UNIT_BAD_SETTING] = "bad-setting",
- [UNIT_ERROR] = "error",
- [UNIT_MERGED] = "merged",
- [UNIT_MASKED] = "masked"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(unit_load_state, UnitLoadState);
-
-static const char* const unit_active_state_table[_UNIT_ACTIVE_STATE_MAX] = {
- [UNIT_ACTIVE] = "active",
- [UNIT_RELOADING] = "reloading",
- [UNIT_INACTIVE] = "inactive",
- [UNIT_FAILED] = "failed",
- [UNIT_ACTIVATING] = "activating",
- [UNIT_DEACTIVATING] = "deactivating",
- [UNIT_MAINTENANCE] = "maintenance",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(unit_active_state, UnitActiveState);
-
-static const char* const automount_state_table[_AUTOMOUNT_STATE_MAX] = {
- [AUTOMOUNT_DEAD] = "dead",
- [AUTOMOUNT_WAITING] = "waiting",
- [AUTOMOUNT_RUNNING] = "running",
- [AUTOMOUNT_FAILED] = "failed"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(automount_state, AutomountState);
-
-static const char* const device_state_table[_DEVICE_STATE_MAX] = {
- [DEVICE_DEAD] = "dead",
- [DEVICE_TENTATIVE] = "tentative",
- [DEVICE_PLUGGED] = "plugged",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(device_state, DeviceState);
-
-static const char* const mount_state_table[_MOUNT_STATE_MAX] = {
- [MOUNT_DEAD] = "dead",
- [MOUNT_MOUNTING] = "mounting",
- [MOUNT_MOUNTING_DONE] = "mounting-done",
- [MOUNT_MOUNTED] = "mounted",
- [MOUNT_REMOUNTING] = "remounting",
- [MOUNT_UNMOUNTING] = "unmounting",
- [MOUNT_REMOUNTING_SIGTERM] = "remounting-sigterm",
- [MOUNT_REMOUNTING_SIGKILL] = "remounting-sigkill",
- [MOUNT_UNMOUNTING_SIGTERM] = "unmounting-sigterm",
- [MOUNT_UNMOUNTING_SIGKILL] = "unmounting-sigkill",
- [MOUNT_FAILED] = "failed",
- [MOUNT_CLEANING] = "cleaning",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(mount_state, MountState);
-
-static const char* const path_state_table[_PATH_STATE_MAX] = {
- [PATH_DEAD] = "dead",
- [PATH_WAITING] = "waiting",
- [PATH_RUNNING] = "running",
- [PATH_FAILED] = "failed"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(path_state, PathState);
-
-static const char* const scope_state_table[_SCOPE_STATE_MAX] = {
- [SCOPE_DEAD] = "dead",
- [SCOPE_START_CHOWN] = "start-chown",
- [SCOPE_RUNNING] = "running",
- [SCOPE_ABANDONED] = "abandoned",
- [SCOPE_STOP_SIGTERM] = "stop-sigterm",
- [SCOPE_STOP_SIGKILL] = "stop-sigkill",
- [SCOPE_FAILED] = "failed",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(scope_state, ScopeState);
-
-static const char* const service_state_table[_SERVICE_STATE_MAX] = {
- [SERVICE_DEAD] = "dead",
- [SERVICE_CONDITION] = "condition",
- [SERVICE_START_PRE] = "start-pre",
- [SERVICE_START] = "start",
- [SERVICE_START_POST] = "start-post",
- [SERVICE_RUNNING] = "running",
- [SERVICE_EXITED] = "exited",
- [SERVICE_RELOAD] = "reload",
- [SERVICE_RELOAD_SIGNAL] = "reload-signal",
- [SERVICE_RELOAD_NOTIFY] = "reload-notify",
- [SERVICE_STOP] = "stop",
- [SERVICE_STOP_WATCHDOG] = "stop-watchdog",
- [SERVICE_STOP_SIGTERM] = "stop-sigterm",
- [SERVICE_STOP_SIGKILL] = "stop-sigkill",
- [SERVICE_STOP_POST] = "stop-post",
- [SERVICE_FINAL_WATCHDOG] = "final-watchdog",
- [SERVICE_FINAL_SIGTERM] = "final-sigterm",
- [SERVICE_FINAL_SIGKILL] = "final-sigkill",
- [SERVICE_FAILED] = "failed",
- [SERVICE_DEAD_BEFORE_AUTO_RESTART] = "dead-before-auto-restart",
- [SERVICE_FAILED_BEFORE_AUTO_RESTART] = "failed-before-auto-restart",
- [SERVICE_DEAD_RESOURCES_PINNED] = "dead-resources-pinned",
- [SERVICE_AUTO_RESTART] = "auto-restart",
- [SERVICE_AUTO_RESTART_QUEUED] = "auto-restart-queued",
- [SERVICE_CLEANING] = "cleaning",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(service_state, ServiceState);
-
-static const char* const slice_state_table[_SLICE_STATE_MAX] = {
- [SLICE_DEAD] = "dead",
- [SLICE_ACTIVE] = "active"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(slice_state, SliceState);
-
-static const char* const socket_state_table[_SOCKET_STATE_MAX] = {
- [SOCKET_DEAD] = "dead",
- [SOCKET_START_PRE] = "start-pre",
- [SOCKET_START_CHOWN] = "start-chown",
- [SOCKET_START_POST] = "start-post",
- [SOCKET_LISTENING] = "listening",
- [SOCKET_RUNNING] = "running",
- [SOCKET_STOP_PRE] = "stop-pre",
- [SOCKET_STOP_PRE_SIGTERM] = "stop-pre-sigterm",
- [SOCKET_STOP_PRE_SIGKILL] = "stop-pre-sigkill",
- [SOCKET_STOP_POST] = "stop-post",
- [SOCKET_FINAL_SIGTERM] = "final-sigterm",
- [SOCKET_FINAL_SIGKILL] = "final-sigkill",
- [SOCKET_FAILED] = "failed",
- [SOCKET_CLEANING] = "cleaning",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(socket_state, SocketState);
-
-static const char* const swap_state_table[_SWAP_STATE_MAX] = {
- [SWAP_DEAD] = "dead",
- [SWAP_ACTIVATING] = "activating",
- [SWAP_ACTIVATING_DONE] = "activating-done",
- [SWAP_ACTIVE] = "active",
- [SWAP_DEACTIVATING] = "deactivating",
- [SWAP_DEACTIVATING_SIGTERM] = "deactivating-sigterm",
- [SWAP_DEACTIVATING_SIGKILL] = "deactivating-sigkill",
- [SWAP_FAILED] = "failed",
- [SWAP_CLEANING] = "cleaning",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(swap_state, SwapState);
-
-static const char* const target_state_table[_TARGET_STATE_MAX] = {
- [TARGET_DEAD] = "dead",
- [TARGET_ACTIVE] = "active"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(target_state, TargetState);
-
-static const char* const timer_state_table[_TIMER_STATE_MAX] = {
- [TIMER_DEAD] = "dead",
- [TIMER_WAITING] = "waiting",
- [TIMER_RUNNING] = "running",
- [TIMER_ELAPSED] = "elapsed",
- [TIMER_FAILED] = "failed"
-};
-
-DEFINE_STRING_TABLE_LOOKUP(timer_state, TimerState);
-
-static const char* const freezer_state_table[_FREEZER_STATE_MAX] = {
- [FREEZER_RUNNING] = "running",
- [FREEZER_FREEZING] = "freezing",
- [FREEZER_FROZEN] = "frozen",
- [FREEZER_THAWING] = "thawing",
-};
-
-DEFINE_STRING_TABLE_LOOKUP(freezer_state, FreezerState);
-
-// ----------------------------------------------------------------------------
-// our code
-
-typedef struct UnitAttribute {
- union {
- int boolean;
- char *str;
- uint64_t uint64;
- int64_t int64;
- uint32_t uint32;
- int32_t int32;
- double dbl;
- };
-} UnitAttribute;
-
-struct UnitInfo;
-typedef void (*attribute_handler_t)(struct UnitInfo *u, UnitAttribute *ua);
-
-static void update_freezer_state(struct UnitInfo *u, UnitAttribute *ua);
-
-struct {
- const char *member;
- char value_type;
-
- const char *show_as;
- const char *info;
- RRDF_FIELD_OPTIONS options;
- RRDF_FIELD_FILTER filter;
-
- attribute_handler_t handler;
-} unit_attributes[] = {
- {
- .member = "Type",
- .value_type = SD_BUS_TYPE_STRING,
- .show_as = "ServiceType",
- .info = "Service Type",
- .options = RRDF_FIELD_OPTS_VISIBLE,
- .filter = RRDF_FIELD_FILTER_MULTISELECT,
- }, {
- .member = "Result",
- .value_type = SD_BUS_TYPE_STRING,
- .show_as = "Result",
- .info = "Result",
- .options = RRDF_FIELD_OPTS_VISIBLE,
- .filter = RRDF_FIELD_FILTER_MULTISELECT,
- }, {
- .member = "UnitFileState",
- .value_type = SD_BUS_TYPE_STRING,
- .show_as = "Enabled",
- .info = "Unit File State",
- .options = RRDF_FIELD_OPTS_NONE,
- .filter = RRDF_FIELD_FILTER_MULTISELECT,
- }, {
- .member = "UnitFilePreset",
- .value_type = SD_BUS_TYPE_STRING,
- .show_as = "Preset",
- .info = "Unit File Preset",
- .options = RRDF_FIELD_OPTS_NONE,
- .filter = RRDF_FIELD_FILTER_MULTISELECT,
- }, {
- .member = "FreezerState",
- .value_type = SD_BUS_TYPE_STRING,
- .show_as = "FreezerState",
- .info = "Freezer State",
- .options = RRDF_FIELD_OPTS_NONE,
- .filter = RRDF_FIELD_FILTER_MULTISELECT,
- .handler = update_freezer_state,
- },
-// { .member = "Id", .signature = "s", },
-// { .member = "LoadState", .signature = "s", },
-// { .member = "ActiveState", .signature = "s", },
-// { .member = "SubState", .signature = "s", },
-// { .member = "Description", .signature = "s", },
-// { .member = "Following", .signature = "s", },
-// { .member = "Documentation", .signature = "as", },
-// { .member = "FragmentPath", .signature = "s", },
-// { .member = "SourcePath", .signature = "s", },
-// { .member = "ControlGroup", .signature = "s", },
-// { .member = "DropInPaths", .signature = "as", },
-// { .member = "LoadError", .signature = "(ss)", },
-// { .member = "TriggeredBy", .signature = "as", },
-// { .member = "Triggers", .signature = "as", },
-// { .member = "InactiveExitTimestamp", .signature = "t", },
-// { .member = "InactiveExitTimestampMonotonic", .signature = "t", },
-// { .member = "ActiveEnterTimestamp", .signature = "t", },
-// { .member = "ActiveExitTimestamp", .signature = "t", },
-// { .member = "RuntimeMaxUSec", .signature = "t", },
-// { .member = "InactiveEnterTimestamp", .signature = "t", },
-// { .member = "NeedDaemonReload", .signature = "b", },
-// { .member = "Transient", .signature = "b", },
-// { .member = "ExecMainPID", .signature = "u", },
-// { .member = "MainPID", .signature = "u", },
-// { .member = "ControlPID", .signature = "u", },
-// { .member = "StatusText", .signature = "s", },
-// { .member = "PIDFile", .signature = "s", },
-// { .member = "StatusErrno", .signature = "i", },
-// { .member = "FileDescriptorStoreMax", .signature = "u", },
-// { .member = "NFileDescriptorStore", .signature = "u", },
-// { .member = "ExecMainStartTimestamp", .signature = "t", },
-// { .member = "ExecMainExitTimestamp", .signature = "t", },
-// { .member = "ExecMainCode", .signature = "i", },
-// { .member = "ExecMainStatus", .signature = "i", },
-// { .member = "LogNamespace", .signature = "s", },
-// { .member = "ConditionTimestamp", .signature = "t", },
-// { .member = "ConditionResult", .signature = "b", },
-// { .member = "Conditions", .signature = "a(sbbsi)", },
-// { .member = "AssertTimestamp", .signature = "t", },
-// { .member = "AssertResult", .signature = "b", },
-// { .member = "Asserts", .signature = "a(sbbsi)", },
-// { .member = "NextElapseUSecRealtime", .signature = "t", },
-// { .member = "NextElapseUSecMonotonic", .signature = "t", },
-// { .member = "NAccepted", .signature = "u", },
-// { .member = "NConnections", .signature = "u", },
-// { .member = "NRefused", .signature = "u", },
-// { .member = "Accept", .signature = "b", },
-// { .member = "Listen", .signature = "a(ss)", },
-// { .member = "SysFSPath", .signature = "s", },
-// { .member = "Where", .signature = "s", },
-// { .member = "What", .signature = "s", },
-// { .member = "MemoryCurrent", .signature = "t", },
-// { .member = "MemoryAvailable", .signature = "t", },
-// { .member = "DefaultMemoryMin", .signature = "t", },
-// { .member = "DefaultMemoryLow", .signature = "t", },
-// { .member = "DefaultStartupMemoryLow", .signature = "t", },
-// { .member = "MemoryMin", .signature = "t", },
-// { .member = "MemoryLow", .signature = "t", },
-// { .member = "StartupMemoryLow", .signature = "t", },
-// { .member = "MemoryHigh", .signature = "t", },
-// { .member = "StartupMemoryHigh", .signature = "t", },
-// { .member = "MemoryMax", .signature = "t", },
-// { .member = "StartupMemoryMax", .signature = "t", },
-// { .member = "MemorySwapMax", .signature = "t", },
-// { .member = "StartupMemorySwapMax", .signature = "t", },
-// { .member = "MemoryZSwapMax", .signature = "t", },
-// { .member = "StartupMemoryZSwapMax", .signature = "t", },
-// { .member = "MemoryLimit", .signature = "t", },
-// { .member = "CPUUsageNSec", .signature = "t", },
-// { .member = "TasksCurrent", .signature = "t", },
-// { .member = "TasksMax", .signature = "t", },
-// { .member = "IPIngressBytes", .signature = "t", },
-// { .member = "IPEgressBytes", .signature = "t", },
-// { .member = "IOReadBytes", .signature = "t", },
-// { .member = "IOWriteBytes", .signature = "t", },
-// { .member = "ExecCondition", .signature = "a(sasbttttuii)", },
-// { .member = "ExecConditionEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecStartPre", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStartPreEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecStart", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStartEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecStartPost", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStartPostEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecReload", .signature = "a(sasbttttuii)", },
-// { .member = "ExecReloadEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecStopPre", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStop", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStopEx", .signature = "a(sasasttttuii)", },
-// { .member = "ExecStopPost", .signature = "a(sasbttttuii)", },
-// { .member = "ExecStopPostEx", .signature = "a(sasasttttuii)", },
-};
-
-#define _UNIT_ATTRIBUTE_MAX (sizeof(unit_attributes) / sizeof(unit_attributes[0]))
-
-typedef struct UnitInfo {
- char *id;
- char *type;
- char *description;
- char *load_state;
- char *active_state;
- char *sub_state;
- char *following;
- char *unit_path;
- uint32_t job_id;
- char *job_type;
- char *job_path;
-
- UnitType UnitType;
- UnitLoadState UnitLoadState;
- UnitActiveState UnitActiveState;
- FreezerState FreezerState;
-
- union {
- AutomountState AutomountState;
- DeviceState DeviceState;
- MountState MountState;
- PathState PathState;
- ScopeState ScopeState;
- ServiceState ServiceState;
- SliceState SliceState;
- SocketState SocketState;
- SwapState SwapState;
- TargetState TargetState;
- TimerState TimerState;
- };
-
- struct UnitAttribute attributes[_UNIT_ATTRIBUTE_MAX];
-
- FACET_ROW_SEVERITY severity;
- uint32_t prio;
-
- struct UnitInfo *prev, *next;
-} UnitInfo;
-
-static void update_freezer_state(UnitInfo *u, UnitAttribute *ua) {
- u->FreezerState = freezer_state_from_string(ua->str);
-}
-
-// ----------------------------------------------------------------------------
-// common helpers
-
-static void log_dbus_error(int r, const char *msg) {
- netdata_log_error("SYSTEMD_UNITS: %s failed with error %d (%s)", msg, r, strerror(-r));
-}
-
-// ----------------------------------------------------------------------------
-// attributes management
-
-static inline ssize_t unit_property_slot_from_string(const char *s) {
- if(!s || !*s)
- return -EINVAL;
-
- for(size_t i = 0; i < _UNIT_ATTRIBUTE_MAX ;i++)
- if(streq_ptr(unit_attributes[i].member, s))
- return (ssize_t)i;
-
- return -EINVAL;
-}
-
-static inline const char *unit_property_name_to_string_from_slot(ssize_t i) {
- if(i >= 0 && i < (ssize_t)_UNIT_ATTRIBUTE_MAX)
- return unit_attributes[i].member;
-
- return NULL;
-}
-
-static inline void systemd_unit_free_property(char type, struct UnitAttribute *at) {
- switch(type) {
- case SD_BUS_TYPE_STRING:
- case SD_BUS_TYPE_OBJECT_PATH:
- freez(at->str);
- at->str = NULL;
- break;
-
- default:
- break;
- }
-}
-
-static int systemd_unit_get_property(sd_bus_message *m, UnitInfo *u, const char *name) {
- int r;
- char type;
-
- r = sd_bus_message_peek_type(m, &type, NULL);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_peek_type()");
- return r;
- }
-
- ssize_t slot = unit_property_slot_from_string(name);
- if(slot < 0) {
- // internal_error(true, "unused attribute '%s' for unit '%s'", name, u->id);
- sd_bus_message_skip(m, NULL);
- return 0;
- }
-
- systemd_unit_free_property(unit_attributes[slot].value_type, &u->attributes[slot]);
-
- if(unit_attributes[slot].value_type != type) {
- netdata_log_error("Type of field '%s' expected to be '%c' but found '%c'. Ignoring field.",
- unit_attributes[slot].member, unit_attributes[slot].value_type, type);
- sd_bus_message_skip(m, NULL);
- return 0;
- }
-
- switch (type) {
- case SD_BUS_TYPE_OBJECT_PATH:
- case SD_BUS_TYPE_STRING: {
- char *s;
-
- r = sd_bus_message_read_basic(m, type, &s);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
-
- if(s && *s)
- u->attributes[slot].str = strdupz(s);
- }
- break;
-
- case SD_BUS_TYPE_BOOLEAN: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].boolean);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_UINT64: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].uint64);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_INT64: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].int64);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_UINT32: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].uint32);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_INT32: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].int32);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_DOUBLE: {
- r = sd_bus_message_read_basic(m, type, &u->attributes[slot].dbl);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic()");
- return r;
- }
- }
- break;
-
- case SD_BUS_TYPE_ARRAY: {
- internal_error(true, "member '%s' is an array", name);
- sd_bus_message_skip(m, NULL);
- return 0;
- }
- break;
-
- default: {
- internal_error(true, "unknown field type '%c' for key '%s'", type, name);
- sd_bus_message_skip(m, NULL);
- return 0;
- }
- break;
- }
-
- if(unit_attributes[slot].handler)
- unit_attributes[slot].handler(u, &u->attributes[slot]);
-
- return 0;
-}
-
-static int systemd_unit_get_all_properties(sd_bus *bus, UnitInfo *u) {
- _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
- _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
- int r;
-
- r = sd_bus_call_method(bus,
- "org.freedesktop.systemd1",
- u->unit_path,
- "org.freedesktop.DBus.Properties",
- "GetAll",
- &error,
- &m,
- "s", "");
- if (r < 0) {
- log_dbus_error(r, "sd_bus_call_method(p1)");
- return r;
- }
-
- r = sd_bus_message_enter_container(m, SD_BUS_TYPE_ARRAY, "{sv}");
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_enter_container(p2)");
- return r;
- }
-
- int c = 0;
- while ((r = sd_bus_message_enter_container(m, SD_BUS_TYPE_DICT_ENTRY, "sv")) > 0) {
- const char *member, *contents;
- c++;
-
- r = sd_bus_message_read_basic(m, SD_BUS_TYPE_STRING, &member);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_read_basic(p3)");
- return r;
- }
-
- r = sd_bus_message_peek_type(m, NULL, &contents);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_peek_type(p4)");
- return r;
- }
-
- r = sd_bus_message_enter_container(m, SD_BUS_TYPE_VARIANT, contents);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_enter_container(p5)");
- return r;
- }
-
- systemd_unit_get_property(m, u, member);
-
- r = sd_bus_message_exit_container(m);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_exit_container(p6)");
- return r;
- }
-
- r = sd_bus_message_exit_container(m);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_exit_container(p7)");
- return r;
- }
- }
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_enter_container(p8)");
- return r;
- }
-
- r = sd_bus_message_exit_container(m);
- if(r < 0) {
- log_dbus_error(r, "sd_bus_message_exit_container(p9)");
- return r;
- }
-
- return 0;
-}
-
-static void systemd_units_get_all_properties(sd_bus *bus, UnitInfo *base) {
- for(UnitInfo *u = base ; u ;u = u->next)
- systemd_unit_get_all_properties(bus, u);
-}
-
-
-
-// ----------------------------------------------------------------------------
-// main unit info
-
-int bus_parse_unit_info(sd_bus_message *message, UnitInfo *u) {
- assert(message);
- assert(u);
-
- u->type = NULL;
-
- int r = sd_bus_message_read(
- message,
- SYSTEMD_UNITS_DBUS_TYPES,
- &u->id,
- &u->description,
- &u->load_state,
- &u->active_state,
- &u->sub_state,
- &u->following,
- &u->unit_path,
- &u->job_id,
- &u->job_type,
- &u->job_path);
-
- if(r <= 0)
- return r;
-
- char *dot;
- if(u->id && (dot = strrchr(u->id, '.')) != NULL)
- u->type = &dot[1];
- else
- u->type = "unknown";
-
- u->UnitType = unit_type_from_string(u->type);
- u->UnitLoadState = unit_load_state_from_string(u->load_state);
- u->UnitActiveState = unit_active_state_from_string(u->active_state);
-
- switch(u->UnitType) {
- case UNIT_SERVICE:
- u->ServiceState = service_state_from_string(u->sub_state);
- break;
-
- case UNIT_MOUNT:
- u->MountState = mount_state_from_string(u->sub_state);
- break;
-
- case UNIT_SWAP:
- u->SwapState = swap_state_from_string(u->sub_state);
- break;
-
- case UNIT_SOCKET:
- u->SocketState = socket_state_from_string(u->sub_state);
- break;
-
- case UNIT_TARGET:
- u->TargetState = target_state_from_string(u->sub_state);
- break;
-
- case UNIT_DEVICE:
- u->DeviceState = device_state_from_string(u->sub_state);
- break;
-
- case UNIT_AUTOMOUNT:
- u->AutomountState = automount_state_from_string(u->sub_state);
- break;
-
- case UNIT_TIMER:
- u->TimerState = timer_state_from_string(u->sub_state);
- break;
-
- case UNIT_PATH:
- u->PathState = path_state_from_string(u->sub_state);
- break;
-
- case UNIT_SLICE:
- u->SliceState = slice_state_from_string(u->sub_state);
- break;
-
- case UNIT_SCOPE:
- u->ScopeState = scope_state_from_string(u->sub_state);
- break;
-
- default:
- break;
- }
-
- return r;
-}
-
-static int hex_to_int(char c) {
- if (c >= '0' && c <= '9') return c - '0';
- if (c >= 'a' && c <= 'f') return c - 'a' + 10;
- if (c >= 'A' && c <= 'F') return c - 'A' + 10;
- return 0;
-}
-
-// un-escape hex sequences (\xNN) in id
-static void txt_decode(char *txt) {
- if(!txt || !*txt)
- return;
-
- char *src = txt, *dst = txt;
-
- size_t id_len = strlen(src);
- size_t s = 0, d = 0;
- for(; s < id_len ; s++) {
- if(src[s] == '\\' && src[s + 1] == 'x' && isxdigit(src[s + 2]) && isxdigit(src[s + 3])) {
- int value = (hex_to_int(src[s + 2]) << 4) + hex_to_int(src[s + 3]);
- dst[d++] = (char)value;
- s += 3;
- }
- else
- dst[d++] = src[s];
- }
- dst[d] = '\0';
-}
-
-static UnitInfo *systemd_units_get_all(void) {
- _cleanup_(sd_bus_unrefp) sd_bus *bus = NULL;
- _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
- _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL;
-
- UnitInfo *base = NULL;
- int r;
-
- r = sd_bus_default_system(&bus);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_default_system()");
- return base;
- }
-
- // This calls the ListUnits method of the org.freedesktop.systemd1.Manager interface
- // Replace "ListUnits" with "ListUnitsFiltered" to get specific units based on filters
- r = sd_bus_call_method(bus,
- "org.freedesktop.systemd1", /* service to contact */
- "/org/freedesktop/systemd1", /* object path */
- "org.freedesktop.systemd1.Manager", /* interface name */
- "ListUnits", /* method name */
- &error, /* object to return error in */
- &reply, /* return message on success */
- NULL); /* input signature */
- if (r < 0) {
- log_dbus_error(r, "sd_bus_call_method()");
- return base;
- }
-
- r = sd_bus_message_enter_container(reply, SD_BUS_TYPE_ARRAY, SYSTEMD_UNITS_DBUS_TYPES);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_enter_container()");
- return base;
- }
-
- UnitInfo u;
- memset(&u, 0, sizeof(u));
- while ((r = bus_parse_unit_info(reply, &u)) > 0) {
- UnitInfo *i = callocz(1, sizeof(u));
- *i = u;
-
- i->id = strdupz(u.id && *u.id ? u.id : "-");
- txt_decode(i->id);
-
- i->type = strdupz(u.type && *u.type ? u.type : "-");
- i->description = strdupz(u.description && *u.description ? u.description : "-");
- txt_decode(i->description);
-
- i->load_state = strdupz(u.load_state && *u.load_state ? u.load_state : "-");
- i->active_state = strdupz(u.active_state && *u.active_state ? u.active_state : "-");
- i->sub_state = strdupz(u.sub_state && *u.sub_state ? u.sub_state : "-");
- i->following = strdupz(u.following && *u.following ? u.following : "-");
- i->unit_path = strdupz(u.unit_path && *u.unit_path ? u.unit_path : "-");
- i->job_type = strdupz(u.job_type && *u.job_type ? u.job_type : "-");
- i->job_path = strdupz(u.job_path && *u.job_path ? u.job_path : "-");
- i->job_id = u.job_id;
-
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(base, i, prev, next);
- memset(&u, 0, sizeof(u));
- }
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_read()");
- return base;
- }
-
- r = sd_bus_message_exit_container(reply);
- if (r < 0) {
- log_dbus_error(r, "sd_bus_message_exit_container()");
- return base;
- }
-
- systemd_units_get_all_properties(bus, base);
-
- return base;
-}
-
-void systemd_units_free_all(UnitInfo *base) {
- while(base) {
- UnitInfo *u = base;
- DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(base, u, prev, next);
- freez((void *)u->id);
- freez((void *)u->type);
- freez((void *)u->description);
- freez((void *)u->load_state);
- freez((void *)u->active_state);
- freez((void *)u->sub_state);
- freez((void *)u->following);
- freez((void *)u->unit_path);
- freez((void *)u->job_type);
- freez((void *)u->job_path);
-
- for(int i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++)
- systemd_unit_free_property(unit_attributes[i].value_type, &u->attributes[i]);
-
- freez(u);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static void netdata_systemd_units_function_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb,
- "%s / %s\n"
- "\n"
- "%s\n"
- "\n"
- "The following parameters are supported:\n"
- "\n"
- " help\n"
- " Shows this help message.\n"
- "\n"
- " info\n"
- " Request initial configuration information about the plugin.\n"
- " The key entity returned is the required_params array, which includes\n"
- " all the available systemd journal sources.\n"
- " When `info` is requested, all other parameters are ignored.\n"
- "\n"
- , program_name
- , SYSTEMD_UNITS_FUNCTION_NAME
- , SYSTEMD_UNITS_FUNCTION_DESCRIPTION
- );
-
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
- buffer_free(wb);
-}
-
-static void netdata_systemd_units_function_info(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
-
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_string(wb, "help", SYSTEMD_UNITS_FUNCTION_DESCRIPTION);
-
- buffer_json_finalize(wb);
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
- buffer_free(wb);
-}
-
-// ----------------------------------------------------------------------------
-
-static void systemd_unit_priority(UnitInfo *u, size_t units) {
- uint32_t prio;
-
- switch(u->severity) {
- case FACET_ROW_SEVERITY_CRITICAL:
- prio = 0;
- break;
-
- default:
- case FACET_ROW_SEVERITY_WARNING:
- prio = 1;
- break;
-
- case FACET_ROW_SEVERITY_NOTICE:
- prio = 2;
- break;
-
- case FACET_ROW_SEVERITY_NORMAL:
- prio = 3;
- break;
-
- case FACET_ROW_SEVERITY_DEBUG:
- prio = 4;
- break;
- }
-
- prio = prio * (uint32_t)(_UNIT_TYPE_MAX + 1) + (uint32_t)u->UnitType;
- u->prio = (prio * units) + u->prio;
-}
-
-#define if_less(current, max, target) ({ \
- typeof(current) _wanted = (current); \
- if((current) < (target)) \
- _wanted = (target) > (max) ? (max) : (target); \
- _wanted; \
-})
-
-#define if_normal(current, max, target) ({ \
- typeof(current) _wanted = (current); \
- if((current) == FACET_ROW_SEVERITY_NORMAL) \
- _wanted = (target) > (max) ? (max) : (target); \
- _wanted; \
-})
-
-FACET_ROW_SEVERITY system_unit_severity(UnitInfo *u) {
- FACET_ROW_SEVERITY severity, max_severity;
-
- switch(u->UnitLoadState) {
- case UNIT_ERROR:
- case UNIT_BAD_SETTING:
- severity = FACET_ROW_SEVERITY_CRITICAL;
- max_severity = FACET_ROW_SEVERITY_CRITICAL;
- break;
-
- default:
- severity = FACET_ROW_SEVERITY_WARNING;
- max_severity = FACET_ROW_SEVERITY_CRITICAL;
- break;
-
- case UNIT_NOT_FOUND:
- severity = FACET_ROW_SEVERITY_NOTICE;
- max_severity = FACET_ROW_SEVERITY_NOTICE;
- break;
-
- case UNIT_LOADED:
- severity = FACET_ROW_SEVERITY_NORMAL;
- max_severity = FACET_ROW_SEVERITY_CRITICAL;
- break;
-
- case UNIT_MERGED:
- case UNIT_MASKED:
- case UNIT_STUB:
- severity = FACET_ROW_SEVERITY_DEBUG;
- max_severity = FACET_ROW_SEVERITY_DEBUG;
- break;
- }
-
- switch(u->UnitActiveState) {
- case UNIT_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case UNIT_RELOADING:
- case UNIT_ACTIVATING:
- case UNIT_DEACTIVATING:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case UNIT_MAINTENANCE:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case UNIT_ACTIVE:
- break;
-
- case UNIT_INACTIVE:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
-
- switch(u->FreezerState) {
- default:
- case FREEZER_FROZEN:
- case FREEZER_FREEZING:
- case FREEZER_THAWING:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case FREEZER_RUNNING:
- break;
- }
-
- switch(u->UnitType) {
- case UNIT_SERVICE:
- switch(u->ServiceState) {
- case SERVICE_FAILED:
- case SERVICE_FAILED_BEFORE_AUTO_RESTART:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case SERVICE_STOP:
- case SERVICE_STOP_WATCHDOG:
- case SERVICE_STOP_SIGTERM:
- case SERVICE_STOP_SIGKILL:
- case SERVICE_STOP_POST:
- case SERVICE_FINAL_WATCHDOG:
- case SERVICE_FINAL_SIGTERM:
- case SERVICE_FINAL_SIGKILL:
- case SERVICE_AUTO_RESTART:
- case SERVICE_AUTO_RESTART_QUEUED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case SERVICE_CONDITION:
- case SERVICE_START_PRE:
- case SERVICE_START:
- case SERVICE_START_POST:
- case SERVICE_RELOAD:
- case SERVICE_RELOAD_SIGNAL:
- case SERVICE_RELOAD_NOTIFY:
- case SERVICE_DEAD_RESOURCES_PINNED:
- case SERVICE_CLEANING:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case SERVICE_EXITED:
- case SERVICE_RUNNING:
- break;
-
- case SERVICE_DEAD:
- case SERVICE_DEAD_BEFORE_AUTO_RESTART:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_MOUNT:
- switch(u->MountState) {
- case MOUNT_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case MOUNT_REMOUNTING_SIGTERM:
- case MOUNT_REMOUNTING_SIGKILL:
- case MOUNT_UNMOUNTING_SIGTERM:
- case MOUNT_UNMOUNTING_SIGKILL:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case MOUNT_MOUNTING:
- case MOUNT_MOUNTING_DONE:
- case MOUNT_REMOUNTING:
- case MOUNT_UNMOUNTING:
- case MOUNT_CLEANING:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case MOUNT_MOUNTED:
- break;
-
- case MOUNT_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_SWAP:
- switch(u->SwapState) {
- case SWAP_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case SWAP_DEACTIVATING_SIGTERM:
- case SWAP_DEACTIVATING_SIGKILL:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case SWAP_ACTIVATING:
- case SWAP_ACTIVATING_DONE:
- case SWAP_DEACTIVATING:
- case SWAP_CLEANING:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case SWAP_ACTIVE:
- break;
-
- case SWAP_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_SOCKET:
- switch(u->SocketState) {
- case SOCKET_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case SOCKET_STOP_PRE_SIGTERM:
- case SOCKET_STOP_PRE_SIGKILL:
- case SOCKET_FINAL_SIGTERM:
- case SOCKET_FINAL_SIGKILL:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case SOCKET_START_PRE:
- case SOCKET_START_CHOWN:
- case SOCKET_START_POST:
- case SOCKET_STOP_PRE:
- case SOCKET_STOP_POST:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case SOCKET_RUNNING:
- case SOCKET_LISTENING:
- break;
-
- case SOCKET_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_TARGET:
- switch(u->TargetState) {
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case TARGET_ACTIVE:
- break;
-
- case TARGET_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_DEVICE:
- switch(u->DeviceState) {
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case DEVICE_TENTATIVE:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case DEVICE_PLUGGED:
- break;
-
- case DEVICE_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_AUTOMOUNT:
- switch(u->AutomountState) {
- case AUTOMOUNT_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case AUTOMOUNT_WAITING:
- case AUTOMOUNT_RUNNING:
- break;
-
- case AUTOMOUNT_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_TIMER:
- switch(u->TimerState) {
- case TIMER_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case TIMER_ELAPSED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case TIMER_WAITING:
- case TIMER_RUNNING:
- break;
-
- case TIMER_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_PATH:
- switch(u->PathState) {
- case PATH_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case PATH_WAITING:
- case PATH_RUNNING:
- break;
-
- case PATH_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_SLICE:
- switch(u->SliceState) {
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case SLICE_ACTIVE:
- break;
-
- case SLICE_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- case UNIT_SCOPE:
- switch(u->ScopeState) {
- case SCOPE_FAILED:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_CRITICAL);
- break;
-
- default:
- case SCOPE_STOP_SIGTERM:
- case SCOPE_STOP_SIGKILL:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
-
- case SCOPE_ABANDONED:
- case SCOPE_START_CHOWN:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_NOTICE);
- break;
-
- case SCOPE_RUNNING:
- break;
-
- case SCOPE_DEAD:
- severity = if_normal(severity, max_severity, FACET_ROW_SEVERITY_DEBUG);
- break;
- }
- break;
-
- default:
- severity = if_less(severity, max_severity, FACET_ROW_SEVERITY_WARNING);
- break;
- }
-
- u->severity = severity;
- return severity;
-}
-
-int unit_info_compar(const void *a, const void *b) {
- UnitInfo *u1 = *((UnitInfo **)a);
- UnitInfo *u2 = *((UnitInfo **)b);
-
- return strcasecmp(u1->id, u2->id);
-}
-
-void systemd_units_assign_priority(UnitInfo *base) {
- size_t units = 0, c = 0, prio = 0;
- for(UnitInfo *u = base; u ; u = u->next)
- units++;
-
- UnitInfo *array[units];
- for(UnitInfo *u = base; u ; u = u->next)
- array[c++] = u;
-
- qsort(array, units, sizeof(UnitInfo *), unit_info_compar);
-
- for(c = 0; c < units ; c++) {
- array[c]->prio = prio++;
- system_unit_severity(array[c]);
- systemd_unit_priority(array[c], units);
- }
-}
-
-void function_systemd_units(const char *transaction, char *function, int timeout, bool *cancelled) {
- char *words[SYSTEMD_UNITS_MAX_PARAMS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_UNITS_MAX_PARAMS);
- for(int i = 1; i < SYSTEMD_UNITS_MAX_PARAMS ;i++) {
- char *keyword = get_word(words, num_words, i);
- if(!keyword) break;
-
- if(strcmp(keyword, "info") == 0) {
- netdata_systemd_units_function_info(transaction);
- return;
- }
- else if(strcmp(keyword, "help") == 0) {
- netdata_systemd_units_function_help(transaction);
- return;
- }
- }
-
- UnitInfo *base = systemd_units_get_all();
- systemd_units_assign_priority(base);
-
- BUFFER *wb = buffer_create(0, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
-
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", 10);
- buffer_json_member_add_string(wb, "help", SYSTEMD_UNITS_FUNCTION_DESCRIPTION);
- buffer_json_member_add_array(wb, "data");
-
- size_t count[_UNIT_ATTRIBUTE_MAX] = { 0 };
- struct UnitAttribute max[_UNIT_ATTRIBUTE_MAX];
-
- for(UnitInfo *u = base; u ;u = u->next) {
- buffer_json_add_array_item_array(wb);
- {
- buffer_json_add_array_item_string(wb, u->id);
-
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "severity", facets_severity_to_string(u->severity));
- }
- buffer_json_object_close(wb);
-
- buffer_json_add_array_item_string(wb, u->type);
- buffer_json_add_array_item_string(wb, u->description);
- buffer_json_add_array_item_string(wb, u->load_state);
- buffer_json_add_array_item_string(wb, u->active_state);
- buffer_json_add_array_item_string(wb, u->sub_state);
- buffer_json_add_array_item_string(wb, u->following);
- buffer_json_add_array_item_string(wb, u->unit_path);
- buffer_json_add_array_item_uint64(wb, u->job_id);
- buffer_json_add_array_item_string(wb, u->job_type);
- buffer_json_add_array_item_string(wb, u->job_path);
-
- for(ssize_t i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++) {
- switch(unit_attributes[i].value_type) {
- case SD_BUS_TYPE_OBJECT_PATH:
- case SD_BUS_TYPE_STRING:
- buffer_json_add_array_item_string(wb, u->attributes[i].str && *u->attributes[i].str ? u->attributes[i].str : "-");
- break;
-
- case SD_BUS_TYPE_UINT64:
- buffer_json_add_array_item_uint64(wb, u->attributes[i].uint64);
- if(!count[i]++) max[i].uint64 = 0;
- max[i].uint64 = MAX(max[i].uint64, u->attributes[i].uint64);
- break;
-
- case SD_BUS_TYPE_UINT32:
- buffer_json_add_array_item_uint64(wb, u->attributes[i].uint32);
- if(!count[i]++) max[i].uint32 = 0;
- max[i].uint32 = MAX(max[i].uint32, u->attributes[i].uint32);
- break;
-
- case SD_BUS_TYPE_INT64:
- buffer_json_add_array_item_uint64(wb, u->attributes[i].int64);
- if(!count[i]++) max[i].uint64 = 0;
- max[i].int64 = MAX(max[i].int64, u->attributes[i].int64);
- break;
-
- case SD_BUS_TYPE_INT32:
- buffer_json_add_array_item_uint64(wb, u->attributes[i].int32);
- if(!count[i]++) max[i].int32 = 0;
- max[i].int32 = MAX(max[i].int32, u->attributes[i].int32);
- break;
-
- case SD_BUS_TYPE_DOUBLE:
- buffer_json_add_array_item_double(wb, u->attributes[i].dbl);
- if(!count[i]++) max[i].dbl = 0.0;
- max[i].dbl = MAX(max[i].dbl, u->attributes[i].dbl);
- break;
-
- case SD_BUS_TYPE_BOOLEAN:
- buffer_json_add_array_item_boolean(wb, u->attributes[i].boolean);
- break;
-
- default:
- break;
- }
- }
-
- buffer_json_add_array_item_uint64(wb, u->prio);
- buffer_json_add_array_item_uint64(wb, 1); // count
- }
- buffer_json_array_close(wb);
- }
-
- buffer_json_array_close(wb); // data
-
- buffer_json_member_add_object(wb, "columns");
- {
- size_t field_id = 0;
-
- buffer_rrdf_table_add_field(wb, field_id++, "id", "Unit ID",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY | RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- buffer_rrdf_table_add_field(
- wb, field_id++,
- "rowOptions", "rowOptions",
- RRDF_FIELD_TYPE_NONE,
- RRDR_FIELD_VISUAL_ROW_OPTIONS,
- RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_FIXED,
- NULL,
- RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_DUMMY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "type", "Unit Type",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "description", "Unit Description",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "loadState", "Unit Load State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "activeState", "Unit Active State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "subState", "Unit Sub State",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_EXPANDED_FILTER,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "following", "Unit Following",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_WRAP,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "path", "Unit Path",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "jobId", "Unit Job ID",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "jobType", "Unit Job Type",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "jobPath", "Unit Job Path",
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_WRAP | RRDF_FIELD_OPTS_FULL_WIDTH,
- NULL);
-
- for(ssize_t i = 0; i < (ssize_t)_UNIT_ATTRIBUTE_MAX ;i++) {
- char key[256], name[256];
-
- if(unit_attributes[i].show_as)
- snprintfz(key, sizeof(key), "%s", unit_attributes[i].show_as);
- else
- snprintfz(key, sizeof(key), "attribute%s", unit_property_name_to_string_from_slot(i));
-
- if(unit_attributes[i].info)
- snprintfz(name, sizeof(name), "%s", unit_attributes[i].info);
- else
- snprintfz(name, sizeof(name), "Attribute %s", unit_property_name_to_string_from_slot(i));
-
- RRDF_FIELD_OPTIONS options = unit_attributes[i].options;
- RRDF_FIELD_FILTER filter = unit_attributes[i].filter;
-
- switch(unit_attributes[i].value_type) {
- case SD_BUS_TYPE_OBJECT_PATH:
- case SD_BUS_TYPE_STRING:
- buffer_rrdf_table_add_field(wb, field_id++, key, name,
- RRDF_FIELD_TYPE_STRING, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, filter,
- RRDF_FIELD_OPTS_WRAP | options,
- NULL);
- break;
-
- case SD_BUS_TYPE_INT32:
- case SD_BUS_TYPE_UINT32:
- case SD_BUS_TYPE_INT64:
- case SD_BUS_TYPE_UINT64: {
- double m;
- if(unit_attributes[i].value_type == SD_BUS_TYPE_UINT64)
- m = (double)max[i].uint64;
- else if(unit_attributes[i].value_type == SD_BUS_TYPE_INT64)
- m = (double)max[i].int64;
- else if(unit_attributes[i].value_type == SD_BUS_TYPE_UINT32)
- m = (double)max[i].uint32;
- else if(unit_attributes[i].value_type == SD_BUS_TYPE_INT32)
- m = (double)max[i].int32;
-
- buffer_rrdf_table_add_field(wb, field_id++, key, name,
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, m, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, filter,
- RRDF_FIELD_OPTS_WRAP | options,
- NULL);
- }
- break;
-
- case SD_BUS_TYPE_DOUBLE:
- buffer_rrdf_table_add_field(wb, field_id++, key, name,
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 2, NULL, max[i].dbl, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_SUM, filter,
- RRDF_FIELD_OPTS_WRAP | options,
- NULL);
- break;
-
- case SD_BUS_TYPE_BOOLEAN:
- buffer_rrdf_table_add_field(wb, field_id++, key, name,
- RRDF_FIELD_TYPE_BOOLEAN, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, filter,
- RRDF_FIELD_OPTS_WRAP | options,
- NULL);
- break;
-
- default:
- break;
- }
-
- }
-
- buffer_rrdf_table_add_field(wb, field_id++, "priority", "Priority",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
-
- buffer_rrdf_table_add_field(wb, field_id++, "count", "Count",
- RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE,
- 0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL,
- RRDF_FIELD_SUMMARY_COUNT, RRDF_FIELD_FILTER_NONE,
- RRDF_FIELD_OPTS_NONE,
- NULL);
- }
-
- buffer_json_object_close(wb); // columns
- buffer_json_member_add_string(wb, "default_sort_column", "priority");
-
- buffer_json_member_add_object(wb, "charts");
- {
- buffer_json_member_add_object(wb, "count");
- {
- buffer_json_member_add_string(wb, "name", "count");
- buffer_json_member_add_string(wb, "type", "stacked-bar");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "count");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_array(wb, "default_charts");
- {
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "count");
- buffer_json_add_array_item_string(wb, "activeState");
- buffer_json_array_close(wb);
- buffer_json_add_array_item_array(wb);
- buffer_json_add_array_item_string(wb, "count");
- buffer_json_add_array_item_string(wb, "subState");
- buffer_json_array_close(wb);
- }
- buffer_json_array_close(wb);
-
- buffer_json_member_add_object(wb, "group_by");
- {
- buffer_json_member_add_object(wb, "type");
- {
- buffer_json_member_add_string(wb, "name", "Top Down Tree");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "type");
- buffer_json_add_array_item_string(wb, "loadState");
- buffer_json_add_array_item_string(wb, "activeState");
- buffer_json_add_array_item_string(wb, "subState");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- buffer_json_member_add_object(wb, "subState");
- {
- buffer_json_member_add_string(wb, "name", "Bottom Up Tree");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "subState");
- buffer_json_add_array_item_string(wb, "activeState");
- buffer_json_add_array_item_string(wb, "loadState");
- buffer_json_add_array_item_string(wb, "type");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + 1);
- buffer_json_finalize(wb);
-
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb);
- netdata_mutex_unlock(&stdout_mutex);
-
- buffer_free(wb);
- systemd_units_free_all(base);
-}
-
-#endif // ENABLE_SYSTEMD_DBUS
diff --git a/collectors/tc.plugin/Makefile.am b/collectors/tc.plugin/Makefile.am
deleted file mode 100644
index 9bc6cf2de..000000000
--- a/collectors/tc.plugin/Makefile.am
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-CLEANFILES = \
- tc-qos-helper.sh \
- $(NULL)
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-dist_plugins_SCRIPTS = \
- tc-qos-helper.sh \
- $(NULL)
-
-dist_noinst_DATA = \
- tc-qos-helper.sh.in \
- README.md \
- $(NULL)
diff --git a/collectors/tc.plugin/README.md b/collectors/tc.plugin/README.md
deleted file mode 120000
index 2a20ff262..000000000
--- a/collectors/tc.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/tc_qos_classes.md \ No newline at end of file
diff --git a/collectors/tc.plugin/integrations/tc_qos_classes.md b/collectors/tc.plugin/integrations/tc_qos_classes.md
deleted file mode 100644
index 7a6650660..000000000
--- a/collectors/tc.plugin/integrations/tc_qos_classes.md
+++ /dev/null
@@ -1,171 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/tc.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/tc.plugin/metadata.yaml"
-sidebar_label: "tc QoS classes"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Linux Systems/Network"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# tc QoS classes
-
-
-<img src="https://netdata.cloud/img/netdata.png" width="150"/>
-
-
-Plugin: tc.plugin
-Module: tc.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.
-
-The plugin uses `tc` command to collect information about Traffic control.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per network device direction
-
-Metrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.
-
-Labels:
-
-| Label | Description |
-|:-----------|:----------------|
-| device | The network interface. |
-| device_name | The network interface name |
-| group | The device family |
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| tc.qos | a dimension per class | kilobits/s |
-| tc.qos_packets | a dimension per class | packets/s |
-| tc.qos_dropped | a dimension per class | packets/s |
-| tc.qos_tokens | a dimension per class | tokens |
-| tc.qos_ctokens | a dimension per class | ctokens |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Create `tc-qos-helper.conf`
-
-In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:
-
-```conf
-tc_show="class"
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:tc]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config option</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |
-| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration using classes defined in `/etc/iproute2/tc_cls`.
-
-An example of class IDs mapped to names in that file can be:
-
-```conf
-2:1 Standard
-2:8 LowPriorityData
-2:10 HighThroughputData
-2:16 OAM
-2:18 LowLatencyData
-2:24 BroadcastVideo
-2:26 MultimediaStreaming
-2:32 RealTimeInteractive
-2:34 MultimediaConferencing
-2:40 Signalling
-2:46 Telephony
-2:48 NetworkControl
-```
-
-You can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
-
-
-```yaml
-[plugin:tc]
- script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh
- enable show all classes and qdiscs for all interfaces = yes
-
-```
-
diff --git a/collectors/tc.plugin/metadata.yaml b/collectors/tc.plugin/metadata.yaml
deleted file mode 100644
index f4039a8c5..000000000
--- a/collectors/tc.plugin/metadata.yaml
+++ /dev/null
@@ -1,148 +0,0 @@
-plugin_name: tc.plugin
-modules:
- - meta:
- plugin_name: tc.plugin
- module_name: tc.plugin
- monitored_instance:
- name: tc QoS classes
- link: "https://wiki.linuxfoundation.org/networking/iproute2"
- categories:
- - data-collection.linux-systems.network-metrics
- icon_filename: "netdata.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Examine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow."
- method_description: "The plugin uses `tc` command to collect information about Traffic control."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`."
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Create `tc-qos-helper.conf`
- description: |
- In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:
-
- ```conf
- tc_show="class"
- ```
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:tc]"
- description: "The main configuration file."
- options:
- description: ""
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: script to run to get tc values
- description: Path to script `tc-qos-helper.sh`
- default_value: "usr/libexec/netdata/plugins.d/tc-qos-helper.s"
- required: false
- - name: enable show all classes and qdiscs for all interfaces
- description: yes/no flag to control what data is presented.
- default_value: "yes"
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Basic
- description: |
- A basic example configuration using classes defined in `/etc/iproute2/tc_cls`.
-
- An example of class IDs mapped to names in that file can be:
-
- ```conf
- 2:1 Standard
- 2:8 LowPriorityData
- 2:10 HighThroughputData
- 2:16 OAM
- 2:18 LowLatencyData
- 2:24 BroadcastVideo
- 2:26 MultimediaStreaming
- 2:32 RealTimeInteractive
- 2:34 MultimediaConferencing
- 2:40 Signalling
- 2:46 Telephony
- 2:48 NetworkControl
- ```
-
- You can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).
-
- config: |
- [plugin:tc]
- script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh
- enable show all classes and qdiscs for all interfaces = yes
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: network device direction
- description: "Metrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics."
- labels:
- - name: device
- description: The network interface.
- - name: device_name
- description: The network interface name
- - name: group
- description: The device family
- metrics:
- - name: tc.qos
- description: Class Usage
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per class
- - name: tc.qos_packets
- description: Class Packets
- unit: "packets/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per class
- - name: tc.qos_dropped
- description: Class Dropped Packets
- unit: "packets/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per class
- - name: tc.qos_tokens
- description: Class Tokens
- unit: "tokens"
- chart_type: line
- dimensions:
- - name: a dimension per class
- - name: tc.qos_ctokens
- description: Class cTokens
- unit: "ctokens"
- chart_type: line
- dimensions:
- - name: a dimension per class
diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
deleted file mode 100644
index eae70453f..000000000
--- a/collectors/tc.plugin/plugin_tc.c
+++ /dev/null
@@ -1,1183 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "daemon/common.h"
-
-#define RRD_TYPE_TC "tc"
-#define PLUGIN_TC_NAME "tc.plugin"
-
-// ----------------------------------------------------------------------------
-// /sbin/tc processor
-// this requires the script plugins.d/tc-qos-helper.sh
-
-#define TC_LINE_MAX 1024
-
-struct tc_class {
- STRING *id;
- STRING *name;
- STRING *leafid;
- STRING *parentid;
-
- bool hasparent;
- bool isleaf;
- bool isqdisc;
- bool render;
- bool name_updated;
- bool updated;
-
- int unupdated; // the number of times, this has been found un-updated
-
- unsigned long long bytes;
- unsigned long long packets;
- unsigned long long dropped;
- unsigned long long tokens;
- unsigned long long ctokens;
-
- //unsigned long long overlimits;
- //unsigned long long requeues;
- //unsigned long long lended;
- //unsigned long long borrowed;
- //unsigned long long giants;
-
- RRDDIM *rd_bytes;
- RRDDIM *rd_packets;
- RRDDIM *rd_dropped;
- RRDDIM *rd_tokens;
- RRDDIM *rd_ctokens;
-};
-
-struct tc_device {
- STRING *id;
- STRING *name;
- STRING *family;
-
- bool name_updated;
- bool family_updated;
-
- char enabled;
- char enabled_bytes;
- char enabled_packets;
- char enabled_dropped;
- char enabled_tokens;
- char enabled_ctokens;
- char enabled_all_classes_qdiscs;
-
- RRDSET *st_bytes;
- RRDSET *st_packets;
- RRDSET *st_dropped;
- RRDSET *st_tokens;
- RRDSET *st_ctokens;
-
- DICTIONARY *classes;
-};
-
-
-// ----------------------------------------------------------------------------
-// tc_class index
-
-static void tc_class_free_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- // struct tc_device *d = data;
- struct tc_class *c = value;
-
- string_freez(c->id);
- string_freez(c->name);
- string_freez(c->leafid);
- string_freez(c->parentid);
-}
-
-static bool tc_class_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) {
- struct tc_device *d = data; (void)d;
- struct tc_class *c = old_value; (void)c;
- struct tc_class *new_c = new_value; (void)new_c;
-
- collector_error("TC: class '%s' is already in device '%s'. Ignoring duplicate.", dictionary_acquired_item_name(item), string2str(d->id));
-
- tc_class_free_callback(item, new_value, data);
-
- return true;
-}
-
-static void tc_class_index_init(struct tc_device *d) {
- if(!d->classes) {
- d->classes = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_SINGLE_THREADED, &dictionary_stats_category_collectors, 0);
-
- dictionary_register_delete_callback(d->classes, tc_class_free_callback, d);
- dictionary_register_conflict_callback(d->classes, tc_class_conflict_callback, d);
- }
-}
-
-static void tc_class_index_destroy(struct tc_device *d) {
- dictionary_destroy(d->classes);
- d->classes = NULL;
-}
-
-static struct tc_class *tc_class_index_add(struct tc_device *d, struct tc_class *c) {
- return dictionary_set(d->classes, string2str(c->id), c, sizeof(*c));
-}
-
-static void tc_class_index_del(struct tc_device *d, struct tc_class *c) {
- dictionary_del(d->classes, string2str(c->id));
-}
-
-static inline struct tc_class *tc_class_index_find(struct tc_device *d, const char *id) {
- return dictionary_get(d->classes, id);
-}
-
-// ----------------------------------------------------------------------------
-// tc_device index
-
-static DICTIONARY *tc_device_root_index = NULL;
-
-static void tc_device_add_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct tc_device *d = value;
- tc_class_index_init(d);
-}
-
-static void tc_device_free_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct tc_device *d = value;
-
- tc_class_index_destroy(d);
-
- string_freez(d->id);
- string_freez(d->name);
- string_freez(d->family);
-}
-
-static void tc_device_index_init() {
- if(!tc_device_root_index) {
- tc_device_root_index = dictionary_create_advanced(
- DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_SINGLE_THREADED | DICT_OPTION_ADD_IN_FRONT,
- &dictionary_stats_category_collectors, 0);
-
- dictionary_register_insert_callback(tc_device_root_index, tc_device_add_callback, NULL);
- dictionary_register_delete_callback(tc_device_root_index, tc_device_free_callback, NULL);
- }
-}
-
-static void tc_device_index_destroy() {
- dictionary_destroy(tc_device_root_index);
- tc_device_root_index = NULL;
-}
-
-static struct tc_device *tc_device_index_add(struct tc_device *d) {
- return dictionary_set(tc_device_root_index, string2str(d->id), d, sizeof(*d));
-}
-
-//static struct tc_device *tc_device_index_del(struct tc_device *d) {
-// dictionary_del(tc_device_root_index, string2str(d->id));
-// return d;
-//}
-
-static inline struct tc_device *tc_device_index_find(const char *id) {
- return dictionary_get(tc_device_root_index, id);
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void tc_class_free(struct tc_device *n, struct tc_class *c) {
- netdata_log_debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d",
- string2str(n->id), string2str(c->id), string2str(c->parentid), string2str(c->leafid),
- c->unupdated);
-
- tc_class_index_del(n, c);
-}
-
-static inline void tc_device_classes_cleanup(struct tc_device *d) {
- static int cleanup_every = 999;
-
- if(unlikely(cleanup_every > 0)) {
- cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120);
- if(cleanup_every < 0) cleanup_every = -cleanup_every;
- }
-
- d->name_updated = false;
- d->family_updated = false;
-
- struct tc_class *c;
- dfe_start_write(d->classes, c) {
- if(unlikely(cleanup_every && c->unupdated >= cleanup_every))
- tc_class_free(d, c);
-
- else {
- c->updated = false;
- c->name_updated = false;
- }
- }
- dfe_done(c);
-}
-
-static inline void tc_device_commit(struct tc_device *d) {
- static int enable_new_interfaces = -1, enable_bytes = -1, enable_packets = -1, enable_dropped = -1, enable_tokens = -1, enable_ctokens = -1, enabled_all_classes_qdiscs = -1;
-
- if(unlikely(enable_new_interfaces == -1)) {
- enable_new_interfaces = config_get_boolean_ondemand("plugin:tc", "enable new interfaces detected at runtime", CONFIG_BOOLEAN_YES);
- enable_bytes = config_get_boolean_ondemand("plugin:tc", "enable traffic charts for all interfaces", CONFIG_BOOLEAN_AUTO);
- enable_packets = config_get_boolean_ondemand("plugin:tc", "enable packets charts for all interfaces", CONFIG_BOOLEAN_AUTO);
- enable_dropped = config_get_boolean_ondemand("plugin:tc", "enable dropped charts for all interfaces", CONFIG_BOOLEAN_AUTO);
- enable_tokens = config_get_boolean_ondemand("plugin:tc", "enable tokens charts for all interfaces", CONFIG_BOOLEAN_NO);
- enable_ctokens = config_get_boolean_ondemand("plugin:tc", "enable ctokens charts for all interfaces", CONFIG_BOOLEAN_NO);
- enabled_all_classes_qdiscs = config_get_boolean_ondemand("plugin:tc", "enable show all classes and qdiscs for all interfaces", CONFIG_BOOLEAN_NO);
- }
-
- if(unlikely(d->enabled == (char)-1)) {
- char var_name[CONFIG_MAX_NAME + 1];
- snprintfz(var_name, CONFIG_MAX_NAME, "qos for %s", string2str(d->id));
-
- d->enabled = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_new_interfaces);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "traffic chart for %s", string2str(d->id));
- d->enabled_bytes = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_bytes);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "packets chart for %s", string2str(d->id));
- d->enabled_packets = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_packets);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "dropped packets chart for %s", string2str(d->id));
- d->enabled_dropped = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_dropped);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "tokens chart for %s", string2str(d->id));
- d->enabled_tokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_tokens);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "ctokens chart for %s", string2str(d->id));
- d->enabled_ctokens = (char)config_get_boolean_ondemand("plugin:tc", var_name, enable_ctokens);
-
- snprintfz(var_name, CONFIG_MAX_NAME, "show all classes for %s", string2str(d->id));
- d->enabled_all_classes_qdiscs = (char)config_get_boolean_ondemand("plugin:tc", var_name, enabled_all_classes_qdiscs);
- }
-
- // we only need to add leaf classes
- struct tc_class *c, *x /*, *root = NULL */;
- unsigned long long bytes_sum = 0, packets_sum = 0, dropped_sum = 0, tokens_sum = 0, ctokens_sum = 0;
- int active_nodes = 0, updated_classes = 0, updated_qdiscs = 0;
-
- // prepare all classes
- // we set reasonable defaults for the rest of the code below
-
- dfe_start_read(d->classes, c) {
- c->render = false; // do not render this class
- c->isleaf = true; // this is a leaf class
- c->hasparent = false; // without a parent
-
- if(unlikely(!c->updated))
- c->unupdated++; // increase its unupdated counter
- else {
- c->unupdated = 0; // reset its unupdated counter
-
- // count how many of each kind
- if(c->isqdisc)
- updated_qdiscs++;
- else
- updated_classes++;
- }
- }
- dfe_done(c);
-
- if(unlikely(!d->enabled || (!updated_classes && !updated_qdiscs))) {
- netdata_log_debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. It is not enabled/updated.", string2str(d->name?d->name:d->id));
- tc_device_classes_cleanup(d);
- return;
- }
-
- if(unlikely(updated_classes && updated_qdiscs)) {
- collector_error("TC: device '%s' has active both classes (%d) and qdiscs (%d). Will render only qdiscs.", string2str(d->id), updated_classes, updated_qdiscs);
-
- // set all classes to !updated
- dfe_start_read(d->classes, c) {
- if (unlikely(!c->isqdisc && c->updated))
- c->updated = false;
- }
- dfe_done(c);
- updated_classes = 0;
- }
-
- // mark the classes as leafs and parents
- //
- // TC is hierarchical:
- // - classes can have other classes in them
- // - the same is true for qdiscs (i.e. qdiscs have classes, that have other qdiscs)
- //
- // we need to present a chart with leaf nodes only, so that the sum
- // of all dimensions of the chart, will be the total utilization
- // of the interface.
- //
- // here we try to find the ones we need to report
- // by default all nodes are marked with: isleaf = 1 (see above)
- //
- // so, here we remove the isleaf flag from nodes in the middle
- // and we add the hasparent flag to leaf nodes we found their parent
- if(likely(!d->enabled_all_classes_qdiscs)) {
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->updated))
- continue;
-
- //netdata_log_debug(D_TC_LOOP, "TC: In device '%s', %s '%s' has leafid: '%s' and parentid '%s'.",
- // d->id,
- // c->isqdisc?"qdisc":"class",
- // c->id,
- // c->leafid?c->leafid:"NULL",
- // c->parentid?c->parentid:"NULL");
-
- // find if c is leaf or not
- dfe_start_read(d->classes, x) {
- if(unlikely(!x->updated || c == x || !x->parentid))
- continue;
-
- // classes have both parentid and leafid
- // qdiscs have only parentid
- // the following works for both (it is an OR)
-
- if((x->parentid && c->id == x->parentid) ||
- (c->leafid && x->parentid && c->leafid == x->parentid)) {
- // netdata_log_debug(D_TC_LOOP, "TC: In device '%s', %s '%s' (leafid: '%s') has as leaf %s '%s' (parentid: '%s').", d->name?d->name:d->id, c->isqdisc?"qdisc":"class", c->name?c->name:c->id, c->leafid?c->leafid:c->id, x->isqdisc?"qdisc":"class", x->name?x->name:x->id, x->parentid?x->parentid:x->id);
- c->isleaf = false;
- x->hasparent = true;
- }
- }
- dfe_done(x);
- }
- dfe_done(c);
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->updated))
- continue;
-
- // netdata_log_debug(D_TC_LOOP, "TC: device '%s', %s '%s' isleaf=%d, hasparent=%d", d->id, (c->isqdisc)?"qdisc":"class", c->id, c->isleaf, c->hasparent);
-
- if(unlikely((c->isleaf && c->hasparent) || d->enabled_all_classes_qdiscs)) {
- c->render = true;
- active_nodes++;
- bytes_sum += c->bytes;
- packets_sum += c->packets;
- dropped_sum += c->dropped;
- tokens_sum += c->tokens;
- ctokens_sum += c->ctokens;
- }
-
- //if(unlikely(!c->hasparent)) {
- // if(root) collector_error("TC: multiple root class/qdisc for device '%s' (old: '%s', new: '%s')", d->id, root->id, c->id);
- // root = c;
- // netdata_log_debug(D_TC_LOOP, "TC: found root class/qdisc '%s'", root->id);
- //}
- }
- dfe_done(c);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- // dump all the list to see what we know
-
- if(unlikely(debug_flags & D_TC_LOOP)) {
- dfe_start_read(d->classes, c) {
- if(c->render) netdata_log_debug(D_TC_LOOP, "TC: final nodes dump for '%s': class %s, OK", string2str(d->name), string2str(c->id));
- else netdata_log_debug(D_TC_LOOP, "TC: final nodes dump for '%s': class '%s', IGNORE (updated: %d, isleaf: %d, hasparent: %d, parent: '%s')",
- string2str(d->name?d->name:d->id), string2str(c->id), c->updated, c->isleaf, c->hasparent, string2str(c->parentid));
- }
- dfe_done(c);
- }
-#endif
-
- if(unlikely(!active_nodes)) {
- netdata_log_debug(D_TC_LOOP, "TC: Ignoring TC device '%s'. No useful classes/qdiscs.", string2str(d->name?d->name:d->id));
- tc_device_classes_cleanup(d);
- return;
- }
-
- netdata_log_debug(D_TC_LOOP, "TC: evaluating TC device '%s'. enabled = %d/%d (bytes: %d/%d, packets: %d/%d, dropped: %d/%d, tokens: %d/%d, ctokens: %d/%d, all_classes_qdiscs: %d/%d), classes: (bytes = %llu, packets = %llu, dropped = %llu, tokens = %llu, ctokens = %llu).",
- string2str(d->name?d->name:d->id),
- d->enabled, enable_new_interfaces,
- d->enabled_bytes, enable_bytes,
- d->enabled_packets, enable_packets,
- d->enabled_dropped, enable_dropped,
- d->enabled_tokens, enable_tokens,
- d->enabled_ctokens, enable_ctokens,
- d->enabled_all_classes_qdiscs, enabled_all_classes_qdiscs,
- bytes_sum,
- packets_sum,
- dropped_sum,
- tokens_sum,
- ctokens_sum
- );
-
- // --------------------------------------------------------------------
- // bytes
-
- if(d->enabled_bytes == CONFIG_BOOLEAN_YES || (d->enabled_bytes == CONFIG_BOOLEAN_AUTO &&
- (bytes_sum || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->enabled_bytes = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_bytes)) {
- d->st_bytes = rrdset_create_localhost(
- RRD_TYPE_TC,
- string2str(d->id),
- string2str(d->name ? d->name : d->id),
- string2str(d->family ? d->family : d->id),
- RRD_TYPE_TC ".qos",
- "Class Usage",
- "kilobits/s",
- PLUGIN_TC_NAME,
- NULL,
- NETDATA_CHART_PRIO_TC_QOS,
- localhost->rrd_update_every,
- d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED);
-
- rrdlabels_add(d->st_bytes->rrdlabels, "device", string2str(d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_bytes->rrdlabels, "device_name", string2str(d->name?d->name:d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_bytes->rrdlabels, "device_group", string2str(d->family?d->family:d->id), RRDLABEL_SRC_AUTO);
- }
- else {
- if(unlikely(d->name_updated))
- rrdset_reset_name(d->st_bytes, string2str(d->name));
-
- if(d->name && d->name_updated)
- rrdlabels_add(d->st_bytes->rrdlabels, "device_name", string2str(d->name), RRDLABEL_SRC_AUTO);
-
- if(d->family && d->family_updated)
- rrdlabels_add(d->st_bytes->rrdlabels, "device_group", string2str(d->family), RRDLABEL_SRC_AUTO);
-
- // TODO
- // update the family
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->render)) continue;
-
- if(unlikely(!c->rd_bytes))
- c->rd_bytes = rrddim_add(d->st_bytes, string2str(c->id), string2str(c->name?c->name:c->id), 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- else if(unlikely(c->name_updated))
- rrddim_reset_name(d->st_bytes, c->rd_bytes, string2str(c->name));
-
- rrddim_set_by_pointer(d->st_bytes, c->rd_bytes, c->bytes);
- }
- dfe_done(c);
-
- rrdset_done(d->st_bytes);
- }
-
- // --------------------------------------------------------------------
- // packets
-
- if(d->enabled_packets == CONFIG_BOOLEAN_YES || (d->enabled_packets == CONFIG_BOOLEAN_AUTO &&
- (packets_sum ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->enabled_packets = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_packets)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_packets", string2str(d->id));
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", string2str(d->name ? d->name : d->id));
-
- d->st_packets = rrdset_create_localhost(
- RRD_TYPE_TC,
- id,
- name,
- string2str(d->family ? d->family : d->id),
- RRD_TYPE_TC ".qos_packets",
- "Class Packets",
- "packets/s",
- PLUGIN_TC_NAME,
- NULL,
- NETDATA_CHART_PRIO_TC_QOS_PACKETS,
- localhost->rrd_update_every,
- d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED);
-
- rrdlabels_add(d->st_packets->rrdlabels, "device", string2str(d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_packets->rrdlabels, "device_name", string2str(d->name?d->name:d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_packets->rrdlabels, "device_group", string2str(d->family?d->family:d->id), RRDLABEL_SRC_AUTO);
- }
- else {
- if(unlikely(d->name_updated)) {
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_packets", string2str(d->name?d->name:d->id));
- rrdset_reset_name(d->st_packets, name);
- }
-
- if(d->name && d->name_updated)
- rrdlabels_add(d->st_packets->rrdlabels, "device_name", string2str(d->name), RRDLABEL_SRC_AUTO);
-
- if(d->family && d->family_updated)
- rrdlabels_add(d->st_packets->rrdlabels, "device_group", string2str(d->family), RRDLABEL_SRC_AUTO);
-
- // TODO
- // update the family
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->render)) continue;
-
- if(unlikely(!c->rd_packets))
- c->rd_packets = rrddim_add(d->st_packets, string2str(c->id), string2str(c->name?c->name:c->id), 1, 1, RRD_ALGORITHM_INCREMENTAL);
- else if(unlikely(c->name_updated))
- rrddim_reset_name(d->st_packets, c->rd_packets, string2str(c->name));
-
- rrddim_set_by_pointer(d->st_packets, c->rd_packets, c->packets);
- }
- dfe_done(c);
-
- rrdset_done(d->st_packets);
- }
-
- // --------------------------------------------------------------------
- // dropped
-
- if(d->enabled_dropped == CONFIG_BOOLEAN_YES || (d->enabled_dropped == CONFIG_BOOLEAN_AUTO &&
- (dropped_sum ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->enabled_dropped = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_dropped)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_dropped", string2str(d->id));
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", string2str(d->name ? d->name : d->id));
-
- d->st_dropped = rrdset_create_localhost(
- RRD_TYPE_TC,
- id,
- name,
- string2str(d->family ? d->family : d->id),
- RRD_TYPE_TC ".qos_dropped",
- "Class Dropped Packets",
- "packets/s",
- PLUGIN_TC_NAME,
- NULL,
- NETDATA_CHART_PRIO_TC_QOS_DROPPED,
- localhost->rrd_update_every,
- d->enabled_all_classes_qdiscs ? RRDSET_TYPE_LINE : RRDSET_TYPE_STACKED);
-
- rrdlabels_add(d->st_dropped->rrdlabels, "device", string2str(d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_dropped->rrdlabels, "device_name", string2str(d->name?d->name:d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_dropped->rrdlabels, "device_group", string2str(d->family?d->family:d->id), RRDLABEL_SRC_AUTO);
- }
- else {
- if(unlikely(d->name_updated)) {
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_dropped", string2str(d->name?d->name:d->id));
- rrdset_reset_name(d->st_dropped, name);
- }
-
- if(d->name && d->name_updated)
- rrdlabels_add(d->st_dropped->rrdlabels, "device_name", string2str(d->name), RRDLABEL_SRC_AUTO);
-
- if(d->family && d->family_updated)
- rrdlabels_add(d->st_dropped->rrdlabels, "device_group", string2str(d->family), RRDLABEL_SRC_AUTO);
-
- // TODO
- // update the family
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->render)) continue;
-
- if(unlikely(!c->rd_dropped))
- c->rd_dropped = rrddim_add(d->st_dropped, string2str(c->id), string2str(c->name?c->name:c->id), 1, 1, RRD_ALGORITHM_INCREMENTAL);
- else if(unlikely(c->name_updated))
- rrddim_reset_name(d->st_dropped, c->rd_dropped, string2str(c->name));
-
- rrddim_set_by_pointer(d->st_dropped, c->rd_dropped, c->dropped);
- }
- dfe_done(c);
-
- rrdset_done(d->st_dropped);
- }
-
- // --------------------------------------------------------------------
- // tokens
-
- if(d->enabled_tokens == CONFIG_BOOLEAN_YES || (d->enabled_tokens == CONFIG_BOOLEAN_AUTO &&
- (tokens_sum ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->enabled_tokens = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_tokens)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_tokens", string2str(d->id));
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", string2str(d->name ? d->name : d->id));
-
- d->st_tokens = rrdset_create_localhost(
- RRD_TYPE_TC,
- id,
- name,
- string2str(d->family ? d->family : d->id),
- RRD_TYPE_TC ".qos_tokens",
- "Class Tokens",
- "tokens",
- PLUGIN_TC_NAME,
- NULL,
- NETDATA_CHART_PRIO_TC_QOS_TOKENS,
- localhost->rrd_update_every,
- RRDSET_TYPE_LINE);
-
- rrdlabels_add(d->st_tokens->rrdlabels, "device", string2str(d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_tokens->rrdlabels, "device_name", string2str(d->name?d->name:d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_tokens->rrdlabels, "device_group", string2str(d->family?d->family:d->id), RRDLABEL_SRC_AUTO);
- }
- else {
- if(unlikely(d->name_updated)) {
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_tokens", string2str(d->name?d->name:d->id));
- rrdset_reset_name(d->st_tokens, name);
- }
-
- if(d->name && d->name_updated)
- rrdlabels_add(d->st_tokens->rrdlabels, "device_name", string2str(d->name), RRDLABEL_SRC_AUTO);
-
- if(d->family && d->family_updated)
- rrdlabels_add(d->st_tokens->rrdlabels, "device_group", string2str(d->family), RRDLABEL_SRC_AUTO);
-
- // TODO
- // update the family
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->render)) continue;
-
- if(unlikely(!c->rd_tokens)) {
- c->rd_tokens = rrddim_add(d->st_tokens, string2str(c->id), string2str(c->name?c->name:c->id), 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- else if(unlikely(c->name_updated))
- rrddim_reset_name(d->st_tokens, c->rd_tokens, string2str(c->name));
-
- rrddim_set_by_pointer(d->st_tokens, c->rd_tokens, c->tokens);
- }
- dfe_done(c);
-
- rrdset_done(d->st_tokens);
- }
-
- // --------------------------------------------------------------------
- // ctokens
-
- if(d->enabled_ctokens == CONFIG_BOOLEAN_YES || (d->enabled_ctokens == CONFIG_BOOLEAN_AUTO &&
- (ctokens_sum ||
- netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) {
- d->enabled_ctokens = CONFIG_BOOLEAN_YES;
-
- if(unlikely(!d->st_ctokens)) {
- char id[RRD_ID_LENGTH_MAX + 1];
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(id, RRD_ID_LENGTH_MAX, "%s_ctokens", string2str(d->id));
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", string2str(d->name ? d->name : d->id));
-
- d->st_ctokens = rrdset_create_localhost(
- RRD_TYPE_TC,
- id,
- name,
- string2str(d->family ? d->family : d->id),
- RRD_TYPE_TC ".qos_ctokens",
- "Class cTokens",
- "ctokens",
- PLUGIN_TC_NAME,
- NULL,
- NETDATA_CHART_PRIO_TC_QOS_CTOKENS,
- localhost->rrd_update_every,
- RRDSET_TYPE_LINE);
-
- rrdlabels_add(d->st_ctokens->rrdlabels, "device", string2str(d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_ctokens->rrdlabels, "device_name", string2str(d->name?d->name:d->id), RRDLABEL_SRC_AUTO);
- rrdlabels_add(d->st_ctokens->rrdlabels, "device_group", string2str(d->family?d->family:d->id), RRDLABEL_SRC_AUTO);
- }
- else {
- netdata_log_debug(D_TC_LOOP, "TC: Updating _ctokens chart for device '%s'", string2str(d->name?d->name:d->id));
-
- if(unlikely(d->name_updated)) {
- char name[RRD_ID_LENGTH_MAX + 1];
- snprintfz(name, RRD_ID_LENGTH_MAX, "%s_ctokens", string2str(d->name?d->name:d->id));
- rrdset_reset_name(d->st_ctokens, name);
- }
-
- if(d->name && d->name_updated)
- rrdlabels_add(d->st_ctokens->rrdlabels, "device_name", string2str(d->name), RRDLABEL_SRC_AUTO);
-
- if(d->family && d->family_updated)
- rrdlabels_add(d->st_ctokens->rrdlabels, "device_group", string2str(d->family), RRDLABEL_SRC_AUTO);
-
- // TODO
- // update the family
- }
-
- dfe_start_read(d->classes, c) {
- if(unlikely(!c->render)) continue;
-
- if(unlikely(!c->rd_ctokens))
- c->rd_ctokens = rrddim_add(d->st_ctokens, string2str(c->id), string2str(c->name?c->name:c->id), 1, 1, RRD_ALGORITHM_ABSOLUTE);
- else if(unlikely(c->name_updated))
- rrddim_reset_name(d->st_ctokens, c->rd_ctokens, string2str(c->name));
-
- rrddim_set_by_pointer(d->st_ctokens, c->rd_ctokens, c->ctokens);
- }
- dfe_done(c);
-
- rrdset_done(d->st_ctokens);
- }
-
- tc_device_classes_cleanup(d);
-}
-
-static inline void tc_device_set_class_name(struct tc_device *d, char *id, char *name) {
- if(unlikely(!name || !*name)) return;
-
- struct tc_class *c = tc_class_index_find(d, id);
- if(likely(c)) {
- if(likely(c->name)) {
- if(!strcmp(string2str(c->name), name)) return;
- string_freez(c->name);
- c->name = NULL;
- }
-
- if(likely(name && *name && strcmp(string2str(c->id), name) != 0)) {
- netdata_log_debug(D_TC_LOOP, "TC: Setting device '%s', class '%s' name to '%s'", string2str(d->id), id, name);
- c->name = string_strdupz(name);
- c->name_updated = true;
- }
- }
-}
-
-static inline void tc_device_set_device_name(struct tc_device *d, char *name) {
- if(unlikely(!name || !*name)) return;
-
- if(d->name) {
- if(!strcmp(string2str(d->name), name)) return;
- string_freez(d->name);
- d->name = NULL;
- }
-
- if(likely(name && *name && strcmp(string2str(d->id), name) != 0)) {
- netdata_log_debug(D_TC_LOOP, "TC: Setting device '%s' name to '%s'", string2str(d->id), name);
- d->name = string_strdupz(name);
- d->name_updated = true;
- }
-}
-
-static inline void tc_device_set_device_family(struct tc_device *d, char *family) {
- string_freez(d->family);
- d->family = NULL;
-
- if(likely(family && *family && strcmp(string2str(d->id), family) != 0)) {
- netdata_log_debug(D_TC_LOOP, "TC: Setting device '%s' family to '%s'", string2str(d->id), family);
- d->family = string_strdupz(family);
- d->family_updated = true;
- }
- // no need for null termination - it is already null
-}
-
-static inline struct tc_device *tc_device_create(char *id) {
- struct tc_device *d = tc_device_index_find(id);
-
- if(!d) {
- netdata_log_debug(D_TC_LOOP, "TC: Creating device '%s'", id);
-
- struct tc_device tmp = {
- .id = string_strdupz(id),
- .enabled = (char)-1,
- };
- d = tc_device_index_add(&tmp);
- }
-
- return(d);
-}
-
-static inline struct tc_class *tc_class_add(struct tc_device *n, char *id, bool qdisc, char *parentid, char *leafid) {
- struct tc_class *c = tc_class_index_find(n, id);
-
- if(!c) {
- netdata_log_debug(D_TC_LOOP, "TC: Creating in device '%s', class id '%s', parentid '%s', leafid '%s'",
- string2str(n->id), id, parentid?parentid:"", leafid?leafid:"");
-
- struct tc_class tmp = {
- .id = string_strdupz(id),
- .isqdisc = qdisc,
- .parentid = string_strdupz(parentid),
- .leafid = string_strdupz(leafid),
- };
-
- tc_class_index_add(n, &tmp);
- }
- return(c);
-}
-
-//static inline void tc_device_free(struct tc_device *d) {
-// tc_device_index_del(d);
-//}
-
-static inline int tc_space(char c) {
- switch(c) {
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- return 1;
-
- default:
- return 0;
- }
-}
-
-static inline void tc_split_words(char *str, char **words, int max_words) {
- char *s = str;
- int i = 0;
-
- // skip all white space
- while(tc_space(*s)) s++;
-
- // store the first word
- words[i++] = s;
-
- // while we have something
- while(*s) {
- // if it is a space
- if(unlikely(tc_space(*s))) {
-
- // terminate the word
- *s++ = '\0';
-
- // skip all white space
- while(tc_space(*s)) s++;
-
- // if we reached the end, stop
- if(!*s) break;
-
- // store the next word
- if(i < max_words) words[i++] = s;
- else break;
- }
- else s++;
- }
-
- // terminate the words
- while(i < max_words) words[i++] = NULL;
-}
-
-static pid_t tc_child_pid = 0;
-
-static void tc_main_cleanup(void *ptr) {
- worker_unregister();
-
- tc_device_index_destroy();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- collector_info("cleaning up...");
-
- if(tc_child_pid) {
- collector_info("TC: killing with SIGTERM tc-qos-helper process %d", tc_child_pid);
- if(killpid(tc_child_pid) != -1) {
- siginfo_t info;
-
- collector_info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid);
- netdata_waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED);
- }
-
- tc_child_pid = 0;
- }
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-#define WORKER_TC_CLASS 0
-#define WORKER_TC_BEGIN 1
-#define WORKER_TC_END 2
-#define WORKER_TC_SENT 3
-#define WORKER_TC_LENDED 4
-#define WORKER_TC_TOKENS 5
-#define WORKER_TC_SETDEVICENAME 6
-#define WORKER_TC_SETDEVICEGROUP 7
-#define WORKER_TC_SETCLASSNAME 8
-#define WORKER_TC_WORKTIME 9
-#define WORKER_TC_PLUGIN_TIME 10
-#define WORKER_TC_DEVICES 11
-#define WORKER_TC_CLASSES 12
-
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 13
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 10
-#endif
-
-void *tc_main(void *ptr) {
- worker_register("TC");
- worker_register_job_name(WORKER_TC_CLASS, "class");
- worker_register_job_name(WORKER_TC_BEGIN, "begin");
- worker_register_job_name(WORKER_TC_END, "end");
- worker_register_job_name(WORKER_TC_SENT, "sent");
- worker_register_job_name(WORKER_TC_LENDED, "lended");
- worker_register_job_name(WORKER_TC_TOKENS, "tokens");
- worker_register_job_name(WORKER_TC_SETDEVICENAME, "devicename");
- worker_register_job_name(WORKER_TC_SETDEVICEGROUP, "devicegroup");
- worker_register_job_name(WORKER_TC_SETCLASSNAME, "classname");
- worker_register_job_name(WORKER_TC_WORKTIME, "worktime");
-
- worker_register_job_custom_metric(WORKER_TC_PLUGIN_TIME, "tc script execution time", "milliseconds/run", WORKER_METRIC_ABSOLUTE);
- worker_register_job_custom_metric(WORKER_TC_DEVICES, "number of devices", "devices", WORKER_METRIC_ABSOLUTE);
- worker_register_job_custom_metric(WORKER_TC_CLASSES, "number of classes", "classes", WORKER_METRIC_ABSOLUTE);
-
- tc_device_index_init();
- netdata_thread_cleanup_push(tc_main_cleanup, ptr);
-
- char command[FILENAME_MAX + 1];
- char *words[PLUGINSD_MAX_WORDS] = { NULL };
-
- uint32_t BEGIN_HASH = simple_hash("BEGIN");
- uint32_t END_HASH = simple_hash("END");
- uint32_t QDISC_HASH = simple_hash("qdisc");
- uint32_t CLASS_HASH = simple_hash("class");
- uint32_t SENT_HASH = simple_hash("Sent");
- uint32_t LENDED_HASH = simple_hash("lended:");
- uint32_t TOKENS_HASH = simple_hash("tokens:");
- uint32_t SETDEVICENAME_HASH = simple_hash("SETDEVICENAME");
- uint32_t SETDEVICEGROUP_HASH = simple_hash("SETDEVICEGROUP");
- uint32_t SETCLASSNAME_HASH = simple_hash("SETCLASSNAME");
- uint32_t WORKTIME_HASH = simple_hash("WORKTIME");
- uint32_t first_hash;
-
- snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir);
- char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
-
- while(service_running(SERVICE_COLLECTORS)) {
- FILE *fp_child_input, *fp_child_output;
- struct tc_device *device = NULL;
- struct tc_class *class = NULL;
-
- snprintfz(command, TC_LINE_MAX, "exec %s %d", tc_script, localhost->rrd_update_every);
- netdata_log_debug(D_TC_LOOP, "executing '%s'", command);
-
- fp_child_output = netdata_popen(command, (pid_t *)&tc_child_pid, &fp_child_input);
- if(unlikely(!fp_child_output)) {
- collector_error("TC: Cannot popen(\"%s\", \"r\").", command);
- goto cleanup;
- }
-
- char buffer[TC_LINE_MAX+1] = "";
- while(fgets(buffer, TC_LINE_MAX, fp_child_output) != NULL) {
- if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
-
- buffer[TC_LINE_MAX] = '\0';
- // netdata_log_debug(D_TC_LOOP, "TC: read '%s'", buffer);
-
- tc_split_words(buffer, words, PLUGINSD_MAX_WORDS);
-
- if(unlikely(!words[0] || !*words[0])) {
- // netdata_log_debug(D_TC_LOOP, "empty line");
- worker_is_idle();
- continue;
- }
- // else netdata_log_debug(D_TC_LOOP, "First word is '%s'", words[0]);
-
- first_hash = simple_hash(words[0]);
-
- if(unlikely(device && ((first_hash == CLASS_HASH && strcmp(words[0], "class") == 0) || (first_hash == QDISC_HASH && strcmp(words[0], "qdisc") == 0)))) {
- worker_is_busy(WORKER_TC_CLASS);
-
- // netdata_log_debug(D_TC_LOOP, "CLASS line on class id='%s', parent='%s', parentid='%s', leaf='%s', leafid='%s'", words[2], words[3], words[4], words[5], words[6]);
-
- char *type = words[1]; // the class/qdisc type: htb, fq_codel, etc
- char *id = words[2]; // the class/qdisc major:minor
- char *parent = words[3]; // the word 'parent' or 'root'
- char *parentid = words[4]; // parentid
- char *leaf = words[5]; // the word 'leaf'
- char *leafid = words[6]; // leafid
-
- int parent_is_root = 0;
- int parent_is_parent = 0;
- if(likely(parent)) {
- parent_is_parent = !strcmp(parent, "parent");
-
- if(!parent_is_parent)
- parent_is_root = !strcmp(parent, "root");
- }
-
- if(likely(type && id && (parent_is_root || parent_is_parent))) {
- bool qdisc = false;
-
- if(first_hash == QDISC_HASH) {
- qdisc = true;
-
- if(!strcmp(type, "ingress")) {
- // we don't want to get the ingress qdisc
- // there should be an IFB interface for this
-
- class = NULL;
- worker_is_idle();
- continue;
- }
-
- if(parent_is_parent && parentid) {
- // eliminate the minor number from parentid
- // why: parentid is the id of the parent class
- // but major: is also the id of the parent qdisc
-
- char *s = parentid;
- while(*s && *s != ':') s++;
- if(*s == ':') s[1] = '\0';
- }
- }
-
- if(parent_is_root) {
- parentid = NULL;
- leafid = NULL;
- }
- else if(!leaf || strcmp(leaf, "leaf") != 0)
- leafid = NULL;
-
- char leafbuf[20 + 1] = "";
- if(leafid && leafid[strlen(leafid) - 1] == ':') {
- strncpyz(leafbuf, leafid, 20 - 1);
- strcat(leafbuf, "1");
- leafid = leafbuf;
- }
-
- class = tc_class_add(device, id, qdisc, parentid, leafid);
- }
- else {
- // clear the last class
- class = NULL;
- }
- }
- else if(unlikely(first_hash == END_HASH && strcmp(words[0], "END") == 0)) {
- worker_is_busy(WORKER_TC_END);
-
- // netdata_log_debug(D_TC_LOOP, "END line");
-
- if(likely(device)) {
- netdata_thread_disable_cancelability();
- tc_device_commit(device);
- // tc_device_free(device);
- netdata_thread_enable_cancelability();
- }
-
- device = NULL;
- class = NULL;
- }
- else if(unlikely(first_hash == BEGIN_HASH && strcmp(words[0], "BEGIN") == 0)) {
- worker_is_busy(WORKER_TC_BEGIN);
-
- // netdata_log_debug(D_TC_LOOP, "BEGIN line on device '%s'", words[1]);
-
- if(likely(words[1] && *words[1])) {
- device = tc_device_create(words[1]);
- }
- else {
- // tc_device_free(device);
- device = NULL;
- }
-
- class = NULL;
- }
- else if(unlikely(device && class && first_hash == SENT_HASH && strcmp(words[0], "Sent") == 0)) {
- worker_is_busy(WORKER_TC_SENT);
-
- // netdata_log_debug(D_TC_LOOP, "SENT line '%s'", words[1]);
- if(likely(words[1] && *words[1])) {
- class->bytes = str2ull(words[1], NULL);
- class->updated = true;
- }
- else {
- class->updated = false;
- }
-
- if(likely(words[3] && *words[3]))
- class->packets = str2ull(words[3], NULL);
-
- if(likely(words[6] && *words[6]))
- class->dropped = str2ull(words[6], NULL);
-
- //if(likely(words[8] && *words[8]))
- // class->overlimits = str2ull(words[8]);
-
- //if(likely(words[10] && *words[10]))
- // class->requeues = str2ull(words[8]);
- }
- else if(unlikely(device && class && class->updated && first_hash == LENDED_HASH && strcmp(words[0], "lended:") == 0)) {
- worker_is_busy(WORKER_TC_LENDED);
-
- // netdata_log_debug(D_TC_LOOP, "LENDED line '%s'", words[1]);
- //if(likely(words[1] && *words[1]))
- // class->lended = str2ull(words[1]);
-
- //if(likely(words[3] && *words[3]))
- // class->borrowed = str2ull(words[3]);
-
- //if(likely(words[5] && *words[5]))
- // class->giants = str2ull(words[5]);
- }
- else if(unlikely(device && class && class->updated && first_hash == TOKENS_HASH && strcmp(words[0], "tokens:") == 0)) {
- worker_is_busy(WORKER_TC_TOKENS);
-
- // netdata_log_debug(D_TC_LOOP, "TOKENS line '%s'", words[1]);
- if(likely(words[1] && *words[1]))
- class->tokens = str2ull(words[1], NULL);
-
- if(likely(words[3] && *words[3]))
- class->ctokens = str2ull(words[3], NULL);
- }
- else if(unlikely(device && first_hash == SETDEVICENAME_HASH && strcmp(words[0], "SETDEVICENAME") == 0)) {
- worker_is_busy(WORKER_TC_SETDEVICENAME);
-
- // netdata_log_debug(D_TC_LOOP, "SETDEVICENAME line '%s'", words[1]);
- if(likely(words[1] && *words[1]))
- tc_device_set_device_name(device, words[1]);
- }
- else if(unlikely(device && first_hash == SETDEVICEGROUP_HASH && strcmp(words[0], "SETDEVICEGROUP") == 0)) {
- worker_is_busy(WORKER_TC_SETDEVICEGROUP);
-
- // netdata_log_debug(D_TC_LOOP, "SETDEVICEGROUP line '%s'", words[1]);
- if(likely(words[1] && *words[1]))
- tc_device_set_device_family(device, words[1]);
- }
- else if(unlikely(device && first_hash == SETCLASSNAME_HASH && strcmp(words[0], "SETCLASSNAME") == 0)) {
- worker_is_busy(WORKER_TC_SETCLASSNAME);
-
- // netdata_log_debug(D_TC_LOOP, "SETCLASSNAME line '%s' '%s'", words[1], words[2]);
- char *id = words[1];
- char *path = words[2];
- if(likely(id && *id && path && *path))
- tc_device_set_class_name(device, id, path);
- }
- else if(unlikely(first_hash == WORKTIME_HASH && strcmp(words[0], "WORKTIME") == 0)) {
- worker_is_busy(WORKER_TC_WORKTIME);
- worker_set_metric(WORKER_TC_PLUGIN_TIME, str2ll(words[1], NULL));
-
- size_t number_of_devices = dictionary_entries(tc_device_root_index);
- size_t number_of_classes = 0;
-
- struct tc_device *d;
- dfe_start_read(tc_device_root_index, d) {
- number_of_classes += dictionary_entries(d->classes);
- }
- dfe_done(d);
-
- worker_set_metric(WORKER_TC_DEVICES, number_of_devices);
- worker_set_metric(WORKER_TC_CLASSES, number_of_classes);
- }
- //else {
- // netdata_log_debug(D_TC_LOOP, "IGNORED line");
- //}
-
- worker_is_idle();
- }
-
- // fgets() failed or loop broke
- int code = netdata_pclose(fp_child_input, fp_child_output, (pid_t)tc_child_pid);
- tc_child_pid = 0;
-
- if(unlikely(device)) {
- // tc_device_free(device);
- device = NULL;
- class = NULL;
- }
-
- if(unlikely(!service_running(SERVICE_COLLECTORS)))
- goto cleanup;
-
- if(code == 1 || code == 127) {
- // 1 = DISABLE
- // 127 = cannot even run it
- collector_error("TC: tc-qos-helper.sh exited with code %d. Disabling it.", code);
- goto cleanup;
- }
-
- sleep((unsigned int) localhost->rrd_update_every);
- }
-
-cleanup: ; // added semi-colon to prevent older gcc error: label at end of compound statement
- worker_unregister();
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/tc.plugin/tc-qos-helper.sh.in b/collectors/tc.plugin/tc-qos-helper.sh.in
deleted file mode 100755
index 3298c39a3..000000000
--- a/collectors/tc.plugin/tc-qos-helper.sh.in
+++ /dev/null
@@ -1,356 +0,0 @@
-#!/usr/bin/env bash
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2023 Netdata Inc.
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This script is a helper to allow netdata collect tc data.
-# tc output parsing has been implemented in C, inside netdata
-# This script allows setting names to dimensions.
-
-export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin:@sbindir_POST@"
-export LC_ALL=C
-
-cmd_line="'${0}' $(printf "'%s' " "${@}")"
-
-# -----------------------------------------------------------------------------
-# logging
-
-PROGRAM_NAME="$(basename "${0}")"
-PROGRAM_NAME="${PROGRAM_NAME/.plugin/}"
-
-# these should be the same with syslog() priorities
-NDLP_EMERG=0 # system is unusable
-NDLP_ALERT=1 # action must be taken immediately
-NDLP_CRIT=2 # critical conditions
-NDLP_ERR=3 # error conditions
-NDLP_WARN=4 # warning conditions
-NDLP_NOTICE=5 # normal but significant condition
-NDLP_INFO=6 # informational
-NDLP_DEBUG=7 # debug-level messages
-
-# the max (numerically) log level we will log
-LOG_LEVEL=$NDLP_INFO
-
-set_log_min_priority() {
- case "${NETDATA_LOG_LEVEL,,}" in
- "emerg" | "emergency")
- LOG_LEVEL=$NDLP_EMERG
- ;;
-
- "alert")
- LOG_LEVEL=$NDLP_ALERT
- ;;
-
- "crit" | "critical")
- LOG_LEVEL=$NDLP_CRIT
- ;;
-
- "err" | "error")
- LOG_LEVEL=$NDLP_ERR
- ;;
-
- "warn" | "warning")
- LOG_LEVEL=$NDLP_WARN
- ;;
-
- "notice")
- LOG_LEVEL=$NDLP_NOTICE
- ;;
-
- "info")
- LOG_LEVEL=$NDLP_INFO
- ;;
-
- "debug")
- LOG_LEVEL=$NDLP_DEBUG
- ;;
- esac
-}
-
-set_log_min_priority
-
-log() {
- local level="${1}"
- shift 1
-
- [[ -n "$level" && -n "$LOG_LEVEL" && "$level" -gt "$LOG_LEVEL" ]] && return
-
- systemd-cat-native --log-as-netdata --newline="--NEWLINE--" <<EOFLOG
-INVOCATION_ID=${NETDATA_INVOCATION_ID}
-SYSLOG_IDENTIFIER=${PROGRAM_NAME}
-PRIORITY=${level}
-THREAD_TAG=tc-qos-helper
-ND_LOG_SOURCE=collector
-ND_REQUEST=${cmd_line}
-MESSAGE=${*//\\n/--NEWLINE--}
-
-EOFLOG
- # AN EMPTY LINE IS NEEDED ABOVE
-}
-
-info() {
- log "$NDLP_INFO" "${@}"
-}
-
-warning() {
- log "$NDLP_WARN" "${@}"
-}
-
-error() {
- log "$NDLP_ERR" "${@}"
-}
-
-fatal() {
- log "$NDLP_ALERT" "${@}"
- exit 1
-}
-
-debug() {
- log "$NDLP_DEBUG" "${@}"
-}
-
-# -----------------------------------------------------------------------------
-# find /var/run/fireqos
-
-# the default
-fireqos_run_dir="/var/run/fireqos"
-
-function realdir() {
- local r
- local t
- r="$1"
- t="$(readlink "$r")"
-
- while [ "$t" ]; do
- r=$(cd "$(dirname "$r")" && cd "$(dirname "$t")" && pwd -P)/$(basename "$t")
- t=$(readlink "$r")
- done
-
- dirname "$r"
-}
-
-if [ ! -d "${fireqos_run_dir}" ]; then
-
- # the fireqos executable - we will use it to find its config
- fireqos="$(command -v fireqos 2>/dev/null)"
-
- if [ -n "${fireqos}" ]; then
-
- fireqos_exec_dir="$(realdir "${fireqos}")"
-
- if [ -n "${fireqos_exec_dir}" ] && [ "${fireqos_exec_dir}" != "." ] && [ -f "${fireqos_exec_dir}/install.config" ]; then
- LOCALSTATEDIR=
- #shellcheck source=/dev/null
- source "${fireqos_exec_dir}/install.config"
-
- if [ -d "${LOCALSTATEDIR}/run/fireqos" ]; then
- fireqos_run_dir="${LOCALSTATEDIR}/run/fireqos"
- else
- warning "FireQOS is installed as '${fireqos}', its installation config at '${fireqos_exec_dir}/install.config' specifies local state data at '${LOCALSTATEDIR}/run/fireqos', but this directory is not found or is not readable (check the permissions of its parents)."
- fi
- else
- warning "Although FireQOS is installed on this system as '${fireqos}', I cannot find/read its installation configuration at '${fireqos_exec_dir}/install.config'."
- fi
- else
- warning "FireQOS is not installed on this system. Use FireQOS to apply traffic QoS and expose the class names to netdata. Check https://github.com/netdata/netdata/tree/master/collectors/tc.plugin#tcplugin"
- fi
-fi
-
-# -----------------------------------------------------------------------------
-
-[ -z "${NETDATA_PLUGINS_DIR}" ] && NETDATA_PLUGINS_DIR="$(dirname "${0}")"
-[ -z "${NETDATA_USER_CONFIG_DIR}" ] && NETDATA_USER_CONFIG_DIR="@configdir_POST@"
-[ -z "${NETDATA_STOCK_CONFIG_DIR}" ] && NETDATA_STOCK_CONFIG_DIR="@libconfigdir_POST@"
-
-plugins_dir="${NETDATA_PLUGINS_DIR}"
-tc="$(command -v tc 2>/dev/null)"
-
-# -----------------------------------------------------------------------------
-# user configuration
-
-# time in seconds to refresh QoS class/qdisc names
-qos_get_class_names_every=120
-
-# time in seconds to exit - netdata will restart the script
-qos_exit_every=3600
-
-# what to use? classes or qdiscs?
-tc_show="qdisc" # can also be "class"
-
-# -----------------------------------------------------------------------------
-# check if we have a valid number for interval
-
-t=${1}
-update_every=$((t))
-[ $((update_every)) -lt 1 ] && update_every=${NETDATA_UPDATE_EVERY}
-[ $((update_every)) -lt 1 ] && update_every=1
-
-# -----------------------------------------------------------------------------
-# allow the user to override our defaults
-
-for CONFIG in "${NETDATA_STOCK_CONFIG_DIR}/tc-qos-helper.conf" "${NETDATA_USER_CONFIG_DIR}/tc-qos-helper.conf"; do
- if [ -f "${CONFIG}" ]; then
- info "Loading config file '${CONFIG}'..."
- #shellcheck source=/dev/null
- source "${CONFIG}" || error "Failed to load config file '${CONFIG}'."
- else
- warning "Cannot find file '${CONFIG}'."
- fi
-done
-
-case "${tc_show}" in
-qdisc | class) ;;
-
-*)
- error "tc_show variable can be either 'qdisc' or 'class' but is set to '${tc_show}'. Assuming it is 'qdisc'."
- tc_show="qdisc"
- ;;
-esac
-
-# -----------------------------------------------------------------------------
-# default sleep function
-
-LOOPSLEEPMS_LASTWORK=0
-loopsleepms() {
- sleep "$1"
-}
-
-# if found and included, this file overwrites loopsleepms()
-# with a high resolution timer function for precise looping.
-#shellcheck source=/dev/null
-. "${plugins_dir}/loopsleepms.sh.inc"
-
-# -----------------------------------------------------------------------------
-# final checks we can run
-
-if [ -z "${tc}" ] || [ ! -x "${tc}" ]; then
- fatal "cannot find command 'tc' in this system."
-fi
-
-tc_devices=
-fix_names=
-
-# -----------------------------------------------------------------------------
-
-setclassname() {
- if [ "${tc_show}" = "qdisc" ]; then
- echo "SETCLASSNAME $4 $2"
- else
- echo "SETCLASSNAME $3 $2"
- fi
-}
-
-show_tc_cls() {
- [ "${tc_show}" = "qdisc" ] && return 1
-
- local x="${1}"
-
- if [ -f /etc/iproute2/tc_cls ]; then
- local classid name rest
- while read -r classid name rest; do
- if [ -z "${classid}" ] ||
- [ -z "${name}" ] ||
- [ "${classid}" = "#" ] ||
- [ "${name}" = "#" ] ||
- [ "${classid:0:1}" = "#" ] ||
- [ "${name:0:1}" = "#" ]; then
- continue
- fi
- setclassname "" "${name}" "${classid}"
- done </etc/iproute2/tc_cls
- return 0
- fi
- return 1
-}
-
-show_fireqos_names() {
- local x="${1}" name n interface_dev interface_classes_monitor
-
- if [ -f "${fireqos_run_dir}/ifaces/${x}" ]; then
- name="$(<"${fireqos_run_dir}/ifaces/${x}")"
- echo "SETDEVICENAME ${name}" || exit
-
- #shellcheck source=/dev/null
- source "${fireqos_run_dir}/${name}.conf"
- for n in ${interface_classes_monitor}; do
- # shellcheck disable=SC2086
- setclassname ${n//|/ }
- done
- [ -n "${interface_dev}" ] && echo "SETDEVICEGROUP ${interface_dev}" || exit
-
- return 0
- fi
-
- return 1
-}
-
-show_tc() {
- local x="${1}"
-
- echo "BEGIN ${x}" || exit
-
- # netdata can parse the output of tc
- ${tc} -s ${tc_show} show dev "${x}"
-
- # check FireQOS names for classes
- if [ -n "${fix_names}" ]; then
- show_fireqos_names "${x}" || show_tc_cls "${x}"
- fi
-
- echo "END ${x}" || exit
-}
-
-find_tc_devices() {
- local count=0 devs dev rest l
-
- # find all the devices in the system
- # without forking
- while IFS=":| " read -r dev rest; do
- count=$((count + 1))
- [ ${count} -le 2 ] && continue
- devs="${devs} ${dev}"
- done </proc/net/dev
-
- # from all the devices find the ones
- # that have QoS defined
- # unfortunately, one fork per device cannot be avoided
- tc_devices=
- for dev in ${devs}; do
- l="$(${tc} class show dev "${dev}" 2>/dev/null)"
- [ -n "${l}" ] && tc_devices="${tc_devices} ${dev}"
- done
-}
-
-# update devices and class names
-# once every 2 minutes
-names_every=$((qos_get_class_names_every / update_every))
-
-# exit this script every hour
-# it will be restarted automatically
-exit_after=$((qos_exit_every / update_every))
-
-c=0
-gc=0
-while true; do
- fix_names=
- c=$((c + 1))
- gc=$((gc + 1))
-
- if [ ${c} -le 1 ] || [ ${c} -ge ${names_every} ]; then
- c=1
- fix_names="YES"
- find_tc_devices
- fi
-
- for d in ${tc_devices}; do
- show_tc "${d}"
- done
-
- echo "WORKTIME ${LOOPSLEEPMS_LASTWORK}" || exit
-
- loopsleepms "${update_every}"
-
- [ ${gc} -gt ${exit_after} ] && exit 0
-done
diff --git a/collectors/timex.plugin/Makefile.am b/collectors/timex.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/timex.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/timex.plugin/README.md b/collectors/timex.plugin/README.md
deleted file mode 120000
index 89c1bd0d4..000000000
--- a/collectors/timex.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/timex.md \ No newline at end of file
diff --git a/collectors/timex.plugin/integrations/timex.md b/collectors/timex.plugin/integrations/timex.md
deleted file mode 100644
index 754b2368c..000000000
--- a/collectors/timex.plugin/integrations/timex.md
+++ /dev/null
@@ -1,143 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/timex.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/timex.plugin/metadata.yaml"
-sidebar_label: "Timex"
-learn_status: "Published"
-learn_rel_path: "Data Collection/System Clock and NTP"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Timex
-
-
-<img src="https://netdata.cloud/img/syslog.png" width="150"/>
-
-
-Plugin: timex.plugin
-Module: timex.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Examine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.
-
-It uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Timex instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| system.clock_sync_state | state | state |
-| system.clock_status | unsync, clockerr | status |
-| system.clock_sync_offset | offset | milliseconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:timex]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-At least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |
-| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-<details><summary>Config</summary>
-
-```yaml
-[plugin:timex]
- update every = 1
- clock synchronization state = yes
- time offset = yes
-
-```
-</details>
-
-
diff --git a/collectors/timex.plugin/metadata.yaml b/collectors/timex.plugin/metadata.yaml
deleted file mode 100644
index 2b43d8a24..000000000
--- a/collectors/timex.plugin/metadata.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-plugin_name: timex.plugin
-modules:
- - meta:
- plugin_name: timex.plugin
- module_name: timex.plugin
- monitored_instance:
- name: Timex
- link: ""
- categories:
- - data-collection.system-clock-and-ntp
- icon_filename: "syslog.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Examine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping."
- method_description: "It uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:timex]"
- description: "The netdata main configuration file."
- options:
- description: "At least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run."
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: clock synchronization state
- description: Make chart showing system clock synchronization state.
- default_value: yes
- required: true
- - name: time offset
- description: Make chart showing computed time offset between local system and reference clock
- default_value: yes
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- config: |
- [plugin:timex]
- update every = 1
- clock synchronization state = yes
- time offset = yes
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: system_clock_sync_state
- link: https://github.com/netdata/netdata/blob/master/health/health.d/timex.conf
- metric: system.clock_sync_state
- info: when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server
- os: "linux"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: system.clock_sync_state
- description: System Clock Synchronization State
- unit: "state"
- chart_type: line
- dimensions:
- - name: state
- - name: system.clock_status
- description: System Clock Status
- unit: "status"
- chart_type: line
- dimensions:
- - name: unsync
- - name: clockerr
- - name: system.clock_sync_offset
- description: Computed Time Offset Between Local System and Reference Clock
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: offset
diff --git a/collectors/timex.plugin/plugin_timex.c b/collectors/timex.plugin/plugin_timex.c
deleted file mode 100644
index 025b699a1..000000000
--- a/collectors/timex.plugin/plugin_timex.c
+++ /dev/null
@@ -1,176 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "daemon/common.h"
-#include "libnetdata/os.h"
-
-#define PLUGIN_TIMEX_NAME "timex.plugin"
-
-#define CONFIG_SECTION_TIMEX "plugin:timex"
-
-struct status_codes {
- char *name;
- int code;
- RRDDIM *rd;
-} sta_codes[] = {
- // {"pll", STA_PLL, NULL},
- // {"ppsfreq", STA_PPSFREQ, NULL},
- // {"ppstime", STA_PPSTIME, NULL},
- // {"fll", STA_FLL, NULL},
- // {"ins", STA_INS, NULL},
- // {"del", STA_DEL, NULL},
- {"unsync", STA_UNSYNC, NULL},
- // {"freqhold", STA_FREQHOLD, NULL},
- // {"ppssignal", STA_PPSSIGNAL, NULL},
- // {"ppsjitter", STA_PPSJITTER, NULL},
- // {"ppswander", STA_PPSWANDER, NULL},
- // {"ppserror", STA_PPSERROR, NULL},
- {"clockerr", STA_CLOCKERR, NULL},
- // {"nano", STA_NANO, NULL},
- // {"clk", STA_CLK, NULL},
- {NULL, 0, NULL},
-};
-
-static void timex_main_cleanup(void *ptr)
-{
- worker_unregister();
-
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
-
- netdata_log_info("cleaning up...");
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-}
-
-void *timex_main(void *ptr)
-{
- worker_register("TIMEX");
- worker_register_job_name(0, "clock check");
-
- netdata_thread_cleanup_push(timex_main_cleanup, ptr);
-
- int update_every = (int)config_get_number(CONFIG_SECTION_TIMEX, "update every", 10);
- if (update_every < localhost->rrd_update_every)
- update_every = localhost->rrd_update_every;
-
- int do_sync = config_get_boolean(CONFIG_SECTION_TIMEX, "clock synchronization state", CONFIG_BOOLEAN_YES);
- int do_offset = config_get_boolean(CONFIG_SECTION_TIMEX, "time offset", CONFIG_BOOLEAN_YES);
-
- if (unlikely(do_sync == CONFIG_BOOLEAN_NO && do_offset == CONFIG_BOOLEAN_NO)) {
- netdata_log_info("No charts to show");
- goto exit;
- }
-
- usec_t step = update_every * USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
- while (service_running(SERVICE_COLLECTORS)) {
- worker_is_idle();
- heartbeat_next(&hb, step);
- worker_is_busy(0);
-
- struct timex timex_buf = {};
- int sync_state = 0;
- static int prev_sync_state = 0;
-
- sync_state = ADJUST_TIMEX(&timex_buf);
-
- int non_seq_failure = (sync_state == -1 && prev_sync_state != -1);
- prev_sync_state = sync_state;
-
- if (non_seq_failure) {
- netdata_log_error("Cannot get clock synchronization state");
- continue;
- }
-
- collected_number divisor = USEC_PER_MS;
- if (timex_buf.status & STA_NANO)
- divisor = NSEC_PER_MSEC;
-
- // ----------------------------------------------------------------
-
- if (do_sync) {
- static RRDSET *st_sync_state = NULL;
- static RRDDIM *rd_sync_state;
-
- if (unlikely(!st_sync_state)) {
- st_sync_state = rrdset_create_localhost(
- "system",
- "clock_sync_state",
- NULL,
- "clock synchronization",
- NULL,
- "System Clock Synchronization State",
- "state",
- PLUGIN_TIMEX_NAME,
- NULL,
- NETDATA_CHART_PRIO_CLOCK_SYNC_STATE,
- update_every,
- RRDSET_TYPE_LINE);
-
- rd_sync_state = rrddim_add(st_sync_state, "state", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_sync_state, rd_sync_state, sync_state != TIME_ERROR ? 1 : 0);
- rrdset_done(st_sync_state);
-
- static RRDSET *st_clock_status = NULL;
-
- if (unlikely(!st_clock_status)) {
- st_clock_status = rrdset_create_localhost(
- "system",
- "clock_status",
- NULL,
- "clock synchronization",
- NULL,
- "System Clock Status",
- "status",
- PLUGIN_TIMEX_NAME,
- NULL,
- NETDATA_CHART_PRIO_CLOCK_STATUS,
- update_every,
- RRDSET_TYPE_LINE);
-
- for (int i = 0; sta_codes[i].name != NULL; i++) {
- sta_codes[i].rd =
- rrddim_add(st_clock_status, sta_codes[i].name, NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
- }
- }
-
- for (int i = 0; sta_codes[i].name != NULL; i++)
- rrddim_set_by_pointer(st_clock_status, sta_codes[i].rd, timex_buf.status & sta_codes[i].code ? 1 : 0);
-
- rrdset_done(st_clock_status);
- }
-
- if (do_offset) {
- static RRDSET *st_offset = NULL;
- static RRDDIM *rd_offset;
-
- if (unlikely(!st_offset)) {
- st_offset = rrdset_create_localhost(
- "system",
- "clock_sync_offset",
- NULL,
- "clock synchronization",
- NULL,
- "Computed Time Offset Between Local System and Reference Clock",
- "milliseconds",
- PLUGIN_TIMEX_NAME,
- NULL,
- NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET,
- update_every,
- RRDSET_TYPE_LINE);
-
- rd_offset = rrddim_add(st_offset, "offset", NULL, 1, divisor, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st_offset, rd_offset, timex_buf.offset);
- rrdset_done(st_offset);
- }
- }
-
-exit:
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
diff --git a/collectors/xenstat.plugin/Makefile.am b/collectors/xenstat.plugin/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/collectors/xenstat.plugin/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/collectors/xenstat.plugin/README.md b/collectors/xenstat.plugin/README.md
deleted file mode 120000
index 32fe4d213..000000000
--- a/collectors/xenstat.plugin/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/xen_xcp-ng.md \ No newline at end of file
diff --git a/collectors/xenstat.plugin/integrations/xen_xcp-ng.md b/collectors/xenstat.plugin/integrations/xen_xcp-ng.md
deleted file mode 100644
index 17dc8d785..000000000
--- a/collectors/xenstat.plugin/integrations/xen_xcp-ng.md
+++ /dev/null
@@ -1,176 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/xenstat.plugin/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/xenstat.plugin/metadata.yaml"
-sidebar_label: "Xen XCP-ng"
-learn_status: "Published"
-learn_rel_path: "Data Collection/Containers and VMs"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Xen XCP-ng
-
-
-<img src="https://netdata.cloud/img/xen.png" width="150"/>
-
-
-Plugin: xenstat.plugin
-Module: xenstat.plugin
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors XenServer and XCP-ng host and domains statistics.
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-The plugin needs setuid.
-
-### Default Behavior
-
-#### Auto-Detection
-
-This plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Xen XCP-ng instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| xenstat.mem | free, used | MiB |
-| xenstat.domains | domains | domains |
-| xenstat.cpus | cpus | cpus |
-| xenstat.cpu_freq | frequency | MHz |
-
-### Per xendomain
-
-Metrics related to Xen domains. Each domain provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |
-| xendomain.cpu | used | percentage |
-| xendomain.mem | maximum, current | MiB |
-| xendomain.vcpu | a dimension per vcpu | percentage |
-
-### Per xendomain vbd
-
-Metrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| xendomain.oo_req_vbd | requests | requests/s |
-| xendomain.requests_vbd | read, write | requests/s |
-| xendomain.sectors_vbd | read, write | sectors/s |
-
-### Per xendomain network
-
-Metrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| xendomain.bytes_network | received, sent | kilobits/s |
-| xendomain.packets_network | received, sent | packets/s |
-| xendomain.errors_network | received, sent | errors/s |
-| xendomain.drops_network | received, sent | drops/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Libraries
-
-1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.
-
- Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`
-
-2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `netdata.conf`.
-Configuration for this specific integration is located in the `[plugin:xenstat]` section within that file.
-
-The file format is a modified INI syntax. The general structure is:
-
-```ini
-[section1]
- option1 = some value
- option2 = some other value
-
-[section2]
- option3 = some third value
-```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config netdata.conf
-```
-#### Options
-
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update every | Data collection frequency. | 1 | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
diff --git a/collectors/xenstat.plugin/metadata.yaml b/collectors/xenstat.plugin/metadata.yaml
deleted file mode 100644
index e5527dbb1..000000000
--- a/collectors/xenstat.plugin/metadata.yaml
+++ /dev/null
@@ -1,195 +0,0 @@
-plugin_name: xenstat.plugin
-modules:
- - meta:
- plugin_name: xenstat.plugin
- module_name: xenstat.plugin
- monitored_instance:
- name: Xen XCP-ng
- link: "https://xenproject.org/"
- categories:
- - data-collection.containers-and-vms
- icon_filename: "xen.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors XenServer and XCP-ng host and domains statistics."
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid."
- default_behavior:
- auto_detection:
- description: "This plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Libraries
- description: |
- 1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.
-
- Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`
-
- 2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.
- configuration:
- file:
- name: "netdata.conf"
- section_name: "[plugin:xenstat]"
- description: "The netdata main configuration file."
- options:
- description: ""
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 1
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: xenstat.mem
- description: Memory Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: used
- - name: xenstat.domains
- description: Number of Domains
- unit: "domains"
- chart_type: line
- dimensions:
- - name: domains
- - name: xenstat.cpus
- description: Number of CPUs
- unit: "cpus"
- chart_type: line
- dimensions:
- - name: cpus
- - name: xenstat.cpu_freq
- description: CPU Frequency
- unit: "MHz"
- chart_type: line
- dimensions:
- - name: frequency
- - name: xendomain
- description: "Metrics related to Xen domains. Each domain provides its own set of the following metrics."
- labels: []
- metrics:
- - name: xendomain.states
- description: Domain States
- unit: "boolean"
- chart_type: line
- dimensions:
- - name: running
- - name: blocked
- - name: paused
- - name: shutdown
- - name: crashed
- - name: dying
- - name: xendomain.cpu
- description: CPU Usage (100% = 1 core)
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: used
- - name: xendomain.mem
- description: Memory Reservation
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: maximum
- - name: current
- - name: xendomain.vcpu
- description: CPU Usage per VCPU
- unit: "percentage"
- chart_type: line
- dimensions:
- - name: a dimension per vcpu
- - name: xendomain vbd
- description: "Metrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics."
- labels: []
- metrics:
- - name: xendomain.oo_req_vbd
- description: VBD{%u} Out Of Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: requests
- - name: xendomain.requests_vbd
- description: VBD{%u} Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: xendomain.sectors_vbd
- description: VBD{%u} Read/Written Sectors
- unit: "sectors/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: xendomain network
- description: "Metrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics."
- labels: []
- metrics:
- - name: xendomain.bytes_network
- description: Network{%u} Received/Sent Bytes
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: xendomain.packets_network
- description: Network{%u} Received/Sent Packets
- unit: "packets/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: xendomain.errors_network
- description: Network{%u} Receive/Transmit Errors
- unit: "errors/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
- - name: xendomain.drops_network
- description: Network{%u} Receive/Transmit Drops
- unit: "drops/s"
- chart_type: line
- dimensions:
- - name: received
- - name: sent
diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c
deleted file mode 100644
index 319396d43..000000000
--- a/collectors/xenstat.plugin/xenstat_plugin.c
+++ /dev/null
@@ -1,1066 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "libnetdata/libnetdata.h"
-#include "libnetdata/required_dummies.h"
-
-#include <xenstat.h>
-#include <libxl.h>
-
-#define PLUGIN_XENSTAT_NAME "xenstat.plugin"
-
-#define NETDATA_CHART_PRIO_XENSTAT_NODE_CPUS 30001
-#define NETDATA_CHART_PRIO_XENSTAT_NODE_CPU_FREQ 30002
-#define NETDATA_CHART_PRIO_XENSTAT_NODE_MEM 30003
-#define NETDATA_CHART_PRIO_XENSTAT_NODE_TMEM 30004
-#define NETDATA_CHART_PRIO_XENSTAT_NODE_DOMAINS 30005
-
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_STATES 30101
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_CPU 30102
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VCPU 30103
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_MEM 30104
-
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_PAGES 30104
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_TMEM_OPERATIONS 30105
-
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_OO_REQ 30200
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_REQUESTS 30300
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_SECTORS 30400
-
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_BYTES 30500
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS 30600
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_ERRORS 30700
-#define NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_DROPS 30800
-
-#define TYPE_LENGTH_MAX 200
-
-#define CHART_IS_OBSOLETE 1
-#define CHART_IS_NOT_OBSOLETE 0
-
-// Variables
-static int debug = 0;
-static int netdata_update_every = 1;
-
-struct vcpu_metrics {
- unsigned int id;
-
- unsigned int online;
- unsigned long long ns;
-
- int chart_generated;
- int updated;
-
- struct vcpu_metrics *next;
-};
-
-struct vbd_metrics {
- unsigned int id;
-
- unsigned int error;
- unsigned long long oo_reqs;
- unsigned long long rd_reqs;
- unsigned long long wr_reqs;
- unsigned long long rd_sects;
- unsigned long long wr_sects;
-
- int oo_req_chart_generated;
- int requests_chart_generated;
- int sectors_chart_generated;
- int updated;
-
- struct vbd_metrics *next;
-};
-
-struct network_metrics {
- unsigned int id;
-
- unsigned long long rbytes;
- unsigned long long rpackets;
- unsigned long long rerrs;
- unsigned long long rdrops;
-
- unsigned long long tbytes;
- unsigned long long tpackets;
- unsigned long long terrs;
- unsigned long long tdrops;
-
- int bytes_chart_generated;
- int packets_chart_generated;
- int errors_chart_generated;
- int drops_chart_generated;
- int updated;
-
- struct network_metrics *next;
-};
-
-struct domain_metrics {
- char *uuid;
- uint32_t hash;
-
- unsigned int id;
- char *name;
-
- // states
- unsigned int running;
- unsigned int blocked;
- unsigned int paused;
- unsigned int shutdown;
- unsigned int crashed;
- unsigned int dying;
- unsigned int cur_vcpus;
-
- unsigned long long cpu_ns;
- unsigned long long cur_mem;
- unsigned long long max_mem;
-
- struct vcpu_metrics *vcpu_root;
- struct vbd_metrics *vbd_root;
- struct network_metrics *network_root;
-
- int states_chart_generated;
- int cpu_chart_generated;
- int vcpu_chart_generated;
- int num_vcpus_changed;
- int mem_chart_generated;
- int updated;
-
- struct domain_metrics *next;
-};
-
-struct node_metrics{
- unsigned long long tot_mem;
- unsigned long long free_mem;
- int num_domains;
- unsigned int num_cpus;
- unsigned long long node_cpu_hz;
-
- struct domain_metrics *domain_root;
-};
-
-static struct node_metrics node_metrics = {
- .domain_root = NULL
-};
-
-static inline struct domain_metrics *domain_metrics_get(const char *uuid, uint32_t hash) {
- struct domain_metrics *d = NULL, *last = NULL;
- for(d = node_metrics.domain_root; d ; last = d, d = d->next) {
- if(unlikely(d->hash == hash && !strcmp(d->uuid, uuid)))
- return d;
- }
-
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: allocating memory for domain with uuid %s\n", uuid);
-
- d = callocz(1, sizeof(struct domain_metrics));
- d->uuid = strdupz(uuid);
- d->hash = hash;
-
- if(unlikely(!last)) {
- d->next = node_metrics.domain_root;
- node_metrics.domain_root = d;
- }
- else {
- d->next = last->next;
- last->next = d;
- }
-
- return d;
-}
-
-static struct domain_metrics *domain_metrics_free(struct domain_metrics *d) {
- struct domain_metrics *cur = NULL, *last = NULL;
- struct vcpu_metrics *vcpu, *vcpu_f;
- struct vbd_metrics *vbd, *vbd_f;
- struct network_metrics *network, *network_f;
-
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: freeing memory for domain '%s' id %u, uuid %s\n", d->name, d->id, d->uuid);
-
- for(cur = node_metrics.domain_root; cur ; last = cur, cur = cur->next) {
- if(unlikely(cur->hash == d->hash && !strcmp(cur->uuid, d->uuid))) break;
- }
-
- if(unlikely(!cur)) {
- netdata_log_error("XENSTAT: failed to free domain metrics.");
- return NULL;
- }
-
- if(likely(last))
- last->next = cur->next;
- else
- node_metrics.domain_root = NULL;
-
- freez(cur->uuid);
- freez(cur->name);
-
- vcpu = cur->vcpu_root;
- while(vcpu) {
- vcpu_f = vcpu;
- vcpu = vcpu->next;
- freez(vcpu_f);
- }
-
- vbd = cur->vbd_root;
- while(vbd) {
- vbd_f = vbd;
- vbd = vbd->next;
- freez(vbd_f);
- }
-
- network = cur->network_root;
- while(network) {
- network_f = network;
- network = network->next;
- freez(network_f);
- }
-
- freez(cur);
-
- return last ? last : NULL;
-}
-
-static int vcpu_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
- unsigned int num_vcpus = 0;
- xenstat_vcpu *vcpu = NULL;
- struct vcpu_metrics *vcpu_m = NULL, *last_vcpu_m = NULL;
-
- num_vcpus = xenstat_domain_num_vcpus(domain);
-
- for(vcpu_m = d->vcpu_root; vcpu_m ; vcpu_m = vcpu_m->next)
- vcpu_m->updated = 0;
-
- vcpu_m = d->vcpu_root;
-
- unsigned int i, num_online_vcpus=0;
- for(i = 0; i < num_vcpus; i++) {
- if(unlikely(!vcpu_m)) {
- vcpu_m = callocz(1, sizeof(struct vcpu_metrics));
-
- if(unlikely(i == 0)) d->vcpu_root = vcpu_m;
- else last_vcpu_m->next = vcpu_m;
- }
-
- vcpu_m->id = i;
-
- vcpu = xenstat_domain_vcpu(domain, i);
-
- if(unlikely(!vcpu)) {
- netdata_log_error("XENSTAT: cannot get VCPU statistics.");
- return 1;
- }
-
- vcpu_m->online = xenstat_vcpu_online(vcpu);
- if(likely(vcpu_m->online)) { num_online_vcpus++; }
- vcpu_m->ns = xenstat_vcpu_ns(vcpu);
-
- vcpu_m->updated = 1;
-
- last_vcpu_m = vcpu_m;
- vcpu_m = vcpu_m->next;
- }
-
- if(unlikely(num_online_vcpus != d->cur_vcpus)) {
- d->num_vcpus_changed = 1;
- d->cur_vcpus = num_online_vcpus;
- }
-
- return 0;
-}
-
-static int vbd_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
- unsigned int num_vbds = xenstat_domain_num_vbds(domain);
- xenstat_vbd *vbd = NULL;
- struct vbd_metrics *vbd_m = NULL, *last_vbd_m = NULL;
-
- for(vbd_m = d->vbd_root; vbd_m ; vbd_m = vbd_m->next)
- vbd_m->updated = 0;
-
- vbd_m = d->vbd_root;
-
- unsigned int i;
- for(i = 0; i < num_vbds; i++) {
- if(unlikely(!vbd_m)) {
- vbd_m = callocz(1, sizeof(struct vbd_metrics));
-
- if(unlikely(i == 0)) d->vbd_root = vbd_m;
- else last_vbd_m->next = vbd_m;
- }
-
- vbd_m->id = i;
-
- vbd = xenstat_domain_vbd(domain, i);
-
- if(unlikely(!vbd)) {
- netdata_log_error("XENSTAT: cannot get VBD statistics.");
- return 1;
- }
-
-#ifdef HAVE_XENSTAT_VBD_ERROR
- vbd_m->error = xenstat_vbd_error(vbd);
-#else
- vbd_m->error = 0;
-#endif
- vbd_m->oo_reqs = xenstat_vbd_oo_reqs(vbd);
- vbd_m->rd_reqs = xenstat_vbd_rd_reqs(vbd);
- vbd_m->wr_reqs = xenstat_vbd_wr_reqs(vbd);
- vbd_m->rd_sects = xenstat_vbd_rd_sects(vbd);
- vbd_m->wr_sects = xenstat_vbd_wr_sects(vbd);
-
- vbd_m->updated = 1;
-
- last_vbd_m = vbd_m;
- vbd_m = vbd_m->next;
- }
-
- return 0;
-}
-
-static int network_metrics_collect(struct domain_metrics *d, xenstat_domain *domain) {
- unsigned int num_networks = xenstat_domain_num_networks(domain);
- xenstat_network *network = NULL;
- struct network_metrics *network_m = NULL, *last_network_m = NULL;
-
- for(network_m = d->network_root; network_m ; network_m = network_m->next)
- network_m->updated = 0;
-
- network_m = d->network_root;
-
- unsigned int i;
- for(i = 0; i < num_networks; i++) {
- if(unlikely(!network_m)) {
- network_m = callocz(1, sizeof(struct network_metrics));
-
- if(unlikely(i == 0)) d->network_root = network_m;
- else last_network_m->next = network_m;
- }
-
- network_m->id = i;
-
- network = xenstat_domain_network(domain, i);
-
- if(unlikely(!network)) {
- netdata_log_error("XENSTAT: cannot get network statistics.");
- return 1;
- }
-
- network_m->rbytes = xenstat_network_rbytes(network);
- network_m->rpackets = xenstat_network_rpackets(network);
- network_m->rerrs = xenstat_network_rerrs(network);
- network_m->rdrops = xenstat_network_rdrop(network);
-
- network_m->tbytes = xenstat_network_tbytes(network);
- network_m->tpackets = xenstat_network_tpackets(network);
- network_m->terrs = xenstat_network_terrs(network);
- network_m->tdrops = xenstat_network_tdrop(network);
-
- network_m->updated = 1;
-
- last_network_m = network_m;
- network_m = network_m->next;
- }
-
- return 0;
-}
-
-static int xenstat_collect(xenstat_handle *xhandle, libxl_ctx *ctx, libxl_dominfo *info) {
-
- // mark all old metrics as not-updated
- struct domain_metrics *d;
- for(d = node_metrics.domain_root; d ; d = d->next)
- d->updated = 0;
-
- xenstat_node *node = xenstat_get_node(xhandle, XENSTAT_ALL);
- if (unlikely(!node)) {
- netdata_log_error("XENSTAT: failed to retrieve statistics from libxenstat.");
- return 1;
- }
-
- node_metrics.tot_mem = xenstat_node_tot_mem(node);
- node_metrics.free_mem = xenstat_node_free_mem(node);
- node_metrics.num_domains = xenstat_node_num_domains(node);
- node_metrics.num_cpus = xenstat_node_num_cpus(node);
- node_metrics.node_cpu_hz = xenstat_node_cpu_hz(node);
-
- int i;
- for(i = 0; i < node_metrics.num_domains; i++) {
- xenstat_domain *domain = NULL;
- char uuid[LIBXL_UUID_FMTLEN + 1];
-
- domain = xenstat_node_domain_by_index(node, i);
-
- // get domain UUID
- unsigned int id = xenstat_domain_id(domain);
- if(unlikely(libxl_domain_info(ctx, info, id))) {
- netdata_log_error("XENSTAT: cannot get domain info.");
- }
- else {
- snprintfz(uuid, LIBXL_UUID_FMTLEN, LIBXL_UUID_FMT "\n", LIBXL_UUID_BYTES(info->uuid));
- }
-
- uint32_t hash = simple_hash(uuid);
- d = domain_metrics_get(uuid, hash);
-
- d->id = id;
- if(unlikely(!d->name)) {
- d->name = strdupz(xenstat_domain_name(domain));
- netdata_fix_chart_id(d->name);
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: domain id %u, uuid %s has name '%s'\n", d->id, d->uuid, d->name);
- }
-
- d->running = xenstat_domain_running(domain);
- d->blocked = xenstat_domain_blocked(domain);
- d->paused = xenstat_domain_paused(domain);
- d->shutdown = xenstat_domain_shutdown(domain);
- d->crashed = xenstat_domain_crashed(domain);
- d->dying = xenstat_domain_dying(domain);
-
- d->cpu_ns = xenstat_domain_cpu_ns(domain);
- d->cur_mem = xenstat_domain_cur_mem(domain);
- d->max_mem = xenstat_domain_max_mem(domain);
-
- if(unlikely(vcpu_metrics_collect(d, domain) || vbd_metrics_collect(d, domain) || network_metrics_collect(d, domain))) {
- xenstat_free_node(node);
- return 1;
- }
-
- d->updated = 1;
- }
-
- xenstat_free_node(node);
-
- return 0;
-}
-
-static void xenstat_send_node_metrics() {
- static int mem_chart_generated = 0, domains_chart_generated = 0, cpus_chart_generated = 0, cpu_freq_chart_generated = 0;
-
- // ----------------------------------------------------------------
-
- if(unlikely(!mem_chart_generated)) {
- printf("CHART xenstat.mem '' 'Memory Usage' 'MiB' 'memory' '' stacked %d %d '' %s\n"
- , NETDATA_CHART_PRIO_XENSTAT_NODE_MEM
- , netdata_update_every
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION %s '' absolute 1 %d\n", "free", netdata_update_every * 1024 * 1024);
- printf("DIMENSION %s '' absolute 1 %d\n", "used", netdata_update_every * 1024 * 1024);
- mem_chart_generated = 1;
- }
-
- printf(
- "BEGIN xenstat.mem\n"
- "SET free = %lld\n"
- "SET used = %lld\n"
- "END\n"
- , (collected_number) node_metrics.free_mem
- , (collected_number) (node_metrics.tot_mem - node_metrics.free_mem)
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!domains_chart_generated)) {
- printf("CHART xenstat.domains '' 'Number of Domains' 'domains' 'domains' '' line %d %d '' %s\n"
- , NETDATA_CHART_PRIO_XENSTAT_NODE_DOMAINS
- , netdata_update_every
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION %s '' absolute 1 %d\n", "domains", netdata_update_every);
- domains_chart_generated = 1;
- }
-
- printf(
- "BEGIN xenstat.domains\n"
- "SET domains = %lld\n"
- "END\n"
- , (collected_number) node_metrics.num_domains
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!cpus_chart_generated)) {
- printf("CHART xenstat.cpus '' 'Number of CPUs' 'cpus' 'cpu' '' line %d %d '' %s\n"
- , NETDATA_CHART_PRIO_XENSTAT_NODE_CPUS
- , netdata_update_every
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION %s '' absolute 1 %d\n", "cpus", netdata_update_every);
- cpus_chart_generated = 1;
- }
-
- printf(
- "BEGIN xenstat.cpus\n"
- "SET cpus = %lld\n"
- "END\n"
- , (collected_number) node_metrics.num_cpus
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!cpu_freq_chart_generated)) {
- printf("CHART xenstat.cpu_freq '' 'CPU Frequency' 'MHz' 'cpu' '' line %d %d '' %s\n"
- , NETDATA_CHART_PRIO_XENSTAT_NODE_CPU_FREQ
- , netdata_update_every
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION %s '' absolute 1 %d\n", "frequency", netdata_update_every * 1024 * 1024);
- cpu_freq_chart_generated = 1;
- }
-
- printf(
- "BEGIN xenstat.cpu_freq\n"
- "SET frequency = %lld\n"
- "END\n"
- , (collected_number) node_metrics.node_cpu_hz
- );
-}
-
-static void print_domain_states_chart_definition(char *type, int obsolete_flag) {
- printf("CHART %s.states '' 'Domain States' 'boolean' 'states' 'xendomain.states' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_STATES
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION running '' absolute 1 %d\n", netdata_update_every);
- printf("DIMENSION blocked '' absolute 1 %d\n", netdata_update_every);
- printf("DIMENSION paused '' absolute 1 %d\n", netdata_update_every);
- printf("DIMENSION shutdown '' absolute 1 %d\n", netdata_update_every);
- printf("DIMENSION crashed '' absolute 1 %d\n", netdata_update_every);
- printf("DIMENSION dying '' absolute 1 %d\n", netdata_update_every);
-}
-
-static void print_domain_cpu_chart_definition(char *type, int obsolete_flag) {
- printf("CHART %s.cpu '' 'CPU Usage (100%% = 1 core)' 'percentage' 'cpu' 'xendomain.cpu' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_CPU
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION used '' incremental 100 %d\n", netdata_update_every * 1000000000);
-}
-
-static void print_domain_mem_chart_definition(char *type, int obsolete_flag) {
- printf("CHART %s.mem '' 'Memory Reservation' 'MiB' 'memory' 'xendomain.mem' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_MEM
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION maximum '' absolute 1 %d\n", netdata_update_every * 1024 * 1024);
- printf("DIMENSION current '' absolute 1 %d\n", netdata_update_every * 1024 * 1024);
-}
-
-static void print_domain_vcpu_chart_definition(char *type, struct domain_metrics *d, int obsolete_flag) {
- struct vcpu_metrics *vcpu_m;
-
- printf("CHART %s.vcpu '' 'CPU Usage per VCPU' 'percentage' 'cpu' 'xendomain.vcpu' line %d %d %s %s\n"
- , type
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VCPU
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
-
- for(vcpu_m = d->vcpu_root; vcpu_m; vcpu_m = vcpu_m->next) {
- if(likely(vcpu_m->updated && vcpu_m->online)) {
- printf("DIMENSION vcpu%u '' incremental 100 %d\n", vcpu_m->id, netdata_update_every * 1000000000);
- }
- }
-}
-
-static void print_domain_vbd_oo_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
- printf("CHART %s.oo_req_vbd%u '' 'VBD%u \"Out Of\" Requests' 'requests/s' 'vbd' 'xendomain.oo_req_vbd' line %u %d %s %s\n"
- , type
- , vbd
- , vbd
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_OO_REQ + vbd
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION requests '' incremental 1 %d\n", netdata_update_every);
-}
-
-static void print_domain_vbd_requests_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
- printf("CHART %s.requests_vbd%u '' 'VBD%u Requests' 'requests/s' 'vbd' 'xendomain.requests_vbd' line %u %d %s %s\n"
- , type
- , vbd
- , vbd
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_REQUESTS + vbd
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION read '' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION write '' incremental -1 %d\n", netdata_update_every);
-}
-
-static void print_domain_vbd_sectors_chart_definition(char *type, unsigned int vbd, int obsolete_flag) {
- printf("CHART %s.sectors_vbd%u '' 'VBD%u Read/Written Sectors' 'sectors/s' 'vbd' 'xendomain.sectors_vbd' line %u %d %s %s\n"
- , type
- , vbd
- , vbd
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_VBD_SECTORS + vbd
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION read '' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION write '' incremental -1 %d\n", netdata_update_every);
-}
-
-static void print_domain_network_bytes_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.bytes_network%u '' 'Network%u Received/Sent Bytes' 'kilobits/s' 'network' 'xendomain.bytes_network' line %u %d %s %s\n"
- , type
- , network
- , network
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_BYTES + network
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION received '' incremental 8 %d\n", netdata_update_every * 1000);
- printf("DIMENSION sent '' incremental -8 %d\n", netdata_update_every * 1000);
-}
-
-static void print_domain_network_packets_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.packets_network%u '' 'Network%u Received/Sent Packets' 'packets/s' 'network' 'xendomain.packets_network' line %u %d %s %s\n"
- , type
- , network
- , network
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
-}
-
-static void print_domain_network_errors_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.errors_network%u '' 'Network%u Receive/Transmit Errors' 'errors/s' 'network' 'xendomain.errors_network' line %u %d %s %s\n"
- , type
- , network
- , network
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
-}
-
-static void print_domain_network_drops_chart_definition(char *type, unsigned int network, int obsolete_flag) {
- printf("CHART %s.drops_network%u '' 'Network%u Receive/Transmit Drops' 'drops/s' 'network' 'xendomain.drops_network' line %u %d %s %s\n"
- , type
- , network
- , network
- , NETDATA_CHART_PRIO_XENSTAT_DOMAIN_NET_PACKETS + network
- , netdata_update_every
- , obsolete_flag ? "obsolete": "''"
- , PLUGIN_XENSTAT_NAME
- );
- printf("DIMENSION received '' incremental 1 %d\n", netdata_update_every);
- printf("DIMENSION sent '' incremental -1 %d\n", netdata_update_every);
-}
-
-static void xenstat_send_domain_metrics() {
-
- if(unlikely(!node_metrics.domain_root)) return;
- struct domain_metrics *d;
-
- for(d = node_metrics.domain_root; d; d = d->next) {
- char type[TYPE_LENGTH_MAX + 1];
- snprintfz(type, TYPE_LENGTH_MAX, "xendomain_%s_%s", d->name, d->uuid);
-
- if(likely(d->updated)) {
-
- // ----------------------------------------------------------------
-
- if(unlikely(!d->states_chart_generated)) {
- print_domain_states_chart_definition(type, CHART_IS_NOT_OBSOLETE);
- d->states_chart_generated = 1;
- }
- printf(
- "BEGIN %s.states\n"
- "SET running = %lld\n"
- "SET blocked = %lld\n"
- "SET paused = %lld\n"
- "SET shutdown = %lld\n"
- "SET crashed = %lld\n"
- "SET dying = %lld\n"
- "END\n"
- , type
- , (collected_number)d->running
- , (collected_number)d->blocked
- , (collected_number)d->paused
- , (collected_number)d->shutdown
- , (collected_number)d->crashed
- , (collected_number)d->dying
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!d->cpu_chart_generated)) {
- print_domain_cpu_chart_definition(type, CHART_IS_NOT_OBSOLETE);
- d->cpu_chart_generated = 1;
- }
- printf(
- "BEGIN %s.cpu\n"
- "SET used = %lld\n"
- "END\n"
- , type
- , (collected_number)d->cpu_ns
- );
-
- // ----------------------------------------------------------------
-
- struct vcpu_metrics *vcpu_m;
-
- if(unlikely(!d->vcpu_chart_generated || d->num_vcpus_changed)) {
- print_domain_vcpu_chart_definition(type, d, CHART_IS_NOT_OBSOLETE);
- d->num_vcpus_changed = 0;
- d->vcpu_chart_generated = 1;
- }
-
- printf("BEGIN %s.vcpu\n", type);
- for(vcpu_m = d->vcpu_root; vcpu_m; vcpu_m = vcpu_m->next) {
- if(likely(vcpu_m->updated && vcpu_m->online)) {
- printf(
- "SET vcpu%u = %lld\n"
- , vcpu_m->id
- , (collected_number)vcpu_m->ns
- );
- }
- }
- printf("END\n");
-
- // ----------------------------------------------------------------
-
- if(unlikely(!d->mem_chart_generated)) {
- print_domain_mem_chart_definition(type, CHART_IS_NOT_OBSOLETE);
- d->mem_chart_generated = 1;
- }
- printf(
- "BEGIN %s.mem\n"
- "SET maximum = %lld\n"
- "SET current = %lld\n"
- "END\n"
- , type
- , (collected_number)d->max_mem
- , (collected_number)d->cur_mem
- );
-
- // ----------------------------------------------------------------
-
- struct vbd_metrics *vbd_m;
- for(vbd_m = d->vbd_root; vbd_m; vbd_m = vbd_m->next) {
- if(likely(vbd_m->updated && !vbd_m->error)) {
- if(unlikely(!vbd_m->oo_req_chart_generated)) {
- print_domain_vbd_oo_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
- vbd_m->oo_req_chart_generated = 1;
- }
- printf(
- "BEGIN %s.oo_req_vbd%u\n"
- "SET requests = %lld\n"
- "END\n"
- , type
- , vbd_m->id
- , (collected_number)vbd_m->oo_reqs
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!vbd_m->requests_chart_generated)) {
- print_domain_vbd_requests_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
- vbd_m->requests_chart_generated = 1;
- }
- printf(
- "BEGIN %s.requests_vbd%u\n"
- "SET read = %lld\n"
- "SET write = %lld\n"
- "END\n"
- , type
- , vbd_m->id
- , (collected_number)vbd_m->rd_reqs
- , (collected_number)vbd_m->wr_reqs
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!vbd_m->sectors_chart_generated)) {
- print_domain_vbd_sectors_chart_definition(type, vbd_m->id, CHART_IS_NOT_OBSOLETE);
- vbd_m->sectors_chart_generated = 1;
- }
- printf(
- "BEGIN %s.sectors_vbd%u\n"
- "SET read = %lld\n"
- "SET write = %lld\n"
- "END\n"
- , type
- , vbd_m->id
- , (collected_number)vbd_m->rd_sects
- , (collected_number)vbd_m->wr_sects
- );
- }
- else {
- if(unlikely(vbd_m->oo_req_chart_generated
- || vbd_m->requests_chart_generated
- || vbd_m->sectors_chart_generated)) {
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for vbd %u, domain '%s', id %u, uuid %s\n", vbd_m->id, d->name, d->id, d->uuid);
- print_domain_vbd_oo_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
- print_domain_vbd_requests_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
- print_domain_vbd_sectors_chart_definition(type, vbd_m->id, CHART_IS_OBSOLETE);
- vbd_m->oo_req_chart_generated = 0;
- vbd_m->requests_chart_generated = 0;
- vbd_m->sectors_chart_generated = 0;
- }
- }
- }
-
- // ----------------------------------------------------------------
-
- struct network_metrics *network_m;
- for(network_m = d->network_root; network_m; network_m = network_m->next) {
- if(likely(network_m->updated)) {
- if(unlikely(!network_m->bytes_chart_generated)) {
- print_domain_network_bytes_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
- network_m->bytes_chart_generated = 1;
- }
- printf(
- "BEGIN %s.bytes_network%u\n"
- "SET received = %lld\n"
- "SET sent = %lld\n"
- "END\n"
- , type
- , network_m->id
- , (collected_number)network_m->rbytes
- , (collected_number)network_m->tbytes
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!network_m->packets_chart_generated)) {
- print_domain_network_packets_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
- network_m->packets_chart_generated = 1;
- }
- printf(
- "BEGIN %s.packets_network%u\n"
- "SET received = %lld\n"
- "SET sent = %lld\n"
- "END\n"
- , type
- , network_m->id
- , (collected_number)network_m->rpackets
- , (collected_number)network_m->tpackets
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!network_m->errors_chart_generated)) {
- print_domain_network_errors_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
- network_m->errors_chart_generated = 1;
- }
- printf(
- "BEGIN %s.errors_network%u\n"
- "SET received = %lld\n"
- "SET sent = %lld\n"
- "END\n"
- , type
- , network_m->id
- , (collected_number)network_m->rerrs
- , (collected_number)network_m->terrs
- );
-
- // ----------------------------------------------------------------
-
- if(unlikely(!network_m->drops_chart_generated)) {
- print_domain_network_drops_chart_definition(type, network_m->id, CHART_IS_NOT_OBSOLETE);
- network_m->drops_chart_generated = 1;
- }
- printf(
- "BEGIN %s.drops_network%u\n"
- "SET received = %lld\n"
- "SET sent = %lld\n"
- "END\n"
- , type
- , network_m->id
- , (collected_number)network_m->rdrops
- , (collected_number)network_m->tdrops
- );
- }
- else {
- if(unlikely(network_m->bytes_chart_generated
- || network_m->packets_chart_generated
- || network_m->errors_chart_generated
- || network_m->drops_chart_generated))
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for network %u, domain '%s', id %u, uuid %s\n", network_m->id, d->name, d->id, d->uuid);
- print_domain_network_bytes_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
- print_domain_network_packets_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
- print_domain_network_errors_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
- print_domain_network_drops_chart_definition(type, network_m->id, CHART_IS_OBSOLETE);
- network_m->bytes_chart_generated = 0;
- network_m->packets_chart_generated = 0;
- network_m->errors_chart_generated = 0;
- network_m->drops_chart_generated = 0;
- }
- }
- }
- else{
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: mark charts as obsolete for domain '%s', id %u, uuid %s\n", d->name, d->id, d->uuid);
- print_domain_states_chart_definition(type, CHART_IS_OBSOLETE);
- print_domain_cpu_chart_definition(type, CHART_IS_OBSOLETE);
- print_domain_vcpu_chart_definition(type, d, CHART_IS_OBSOLETE);
- print_domain_mem_chart_definition(type, CHART_IS_OBSOLETE);
-
- d = domain_metrics_free(d);
- }
- }
-}
-
-int main(int argc, char **argv) {
- clocks_init();
-
- // ------------------------------------------------------------------------
- // initialization of netdata plugin
-
- program_name = PLUGIN_XENSTAT_NAME;
-
- nd_log_initialize_for_external_plugins(PLUGIN_XENSTAT_NAME);
-
- // ------------------------------------------------------------------------
- // parse command line parameters
-
- int i, freq = 0;
- for(i = 1; i < argc ; i++) {
- if(isdigit(*argv[i]) && !freq) {
- int n = str2i(argv[i]);
- if(n > 0 && n < 86400) {
- freq = n;
- continue;
- }
- }
- else if(strcmp("version", argv[i]) == 0 || strcmp("-version", argv[i]) == 0 || strcmp("--version", argv[i]) == 0 || strcmp("-v", argv[i]) == 0 || strcmp("-V", argv[i]) == 0) {
- printf("xenstat.plugin %s\n", VERSION);
- exit(0);
- }
- else if(strcmp("debug", argv[i]) == 0) {
- debug = 1;
- continue;
- }
- else if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) {
- fprintf(stderr,
- "\n"
- " netdata xenstat.plugin %s\n"
- " Copyright (C) 2019 Netdata Inc.\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This program is a data collector plugin for netdata.\n"
- "\n"
- " Available command line options:\n"
- "\n"
- " COLLECTION_FREQUENCY data collection frequency in seconds\n"
- " minimum: %d\n"
- "\n"
- " debug enable verbose output\n"
- " default: disabled\n"
- "\n"
- " -v\n"
- " -V\n"
- " --version print version and exit\n"
- "\n"
- " -h\n"
- " --help print this message and exit\n"
- "\n"
- " For more information:\n"
- " https://github.com/netdata/netdata/tree/master/collectors/xenstat.plugin\n"
- "\n"
- , VERSION
- , netdata_update_every
- );
- exit(1);
- }
-
- netdata_log_error("xenstat.plugin: ignoring parameter '%s'", argv[i]);
- }
-
- errno = 0;
-
- if(freq >= netdata_update_every)
- netdata_update_every = freq;
- else if(freq)
- netdata_log_error("update frequency %d seconds is too small for XENSTAT. Using %d.", freq, netdata_update_every);
-
- // ------------------------------------------------------------------------
- // initialize xen API handles
- xenstat_handle *xhandle = NULL;
- libxl_ctx *ctx = NULL;
- libxl_dominfo info;
-
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_init()\n");
- xhandle = xenstat_init();
- if (xhandle == NULL) {
- netdata_log_error("XENSTAT: failed to initialize xenstat library.");
- return 1;
- }
-
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling libxl_ctx_alloc()\n");
- if (libxl_ctx_alloc(&ctx, LIBXL_VERSION, 0, NULL)) {
- netdata_log_error("XENSTAT: failed to initialize xl context.");
- xenstat_uninit(xhandle);
- return 1;
- }
- libxl_dominfo_init(&info);
-
- // ------------------------------------------------------------------------
- // the main loop
-
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: starting data collection\n");
-
- time_t started_t = now_monotonic_sec();
-
- size_t iteration;
- usec_t step = netdata_update_every * USEC_PER_SEC;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
-
- if(unlikely(netdata_exit)) break;
-
- if(unlikely(debug && iteration))
- fprintf(stderr, "xenstat.plugin: iteration %zu, dt %llu usec\n"
- , iteration
- , dt
- );
-
- if(likely(xhandle)) {
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_collect()\n");
- int ret = xenstat_collect(xhandle, ctx, &info);
-
- if(likely(!ret)) {
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_send_node_metrics()\n");
- xenstat_send_node_metrics();
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: calling xenstat_send_domain_metrics()\n");
- xenstat_send_domain_metrics();
- }
- else {
- if(unlikely(debug)) fprintf(stderr, "xenstat.plugin: can't collect data\n");
- }
- }
-
- fflush(stdout);
-
- // restart check (14400 seconds)
- if(unlikely(now_monotonic_sec() - started_t > 14400)) break;
- }
-
- libxl_ctx_free(ctx);
- xenstat_uninit(xhandle);
- netdata_log_info("XENSTAT process exiting");
-
- return 0;
-}